You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1823 lines
71KB

  1. /*
  2. * VP9 compatible video decoder
  3. *
  4. * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
  5. * Copyright (C) 2013 Clément Bœsch <u pkh me>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "avcodec.h"
  24. #include "get_bits.h"
  25. #include "hwaccel.h"
  26. #include "internal.h"
  27. #include "profiles.h"
  28. #include "thread.h"
  29. #include "videodsp.h"
  30. #include "vp56.h"
  31. #include "vp9.h"
  32. #include "vp9data.h"
  33. #include "vp9dec.h"
  34. #include "libavutil/avassert.h"
  35. #include "libavutil/pixdesc.h"
  36. #define VP9_SYNCCODE 0x498342
  37. #if HAVE_THREADS
  38. static void vp9_free_entries(AVCodecContext *avctx) {
  39. VP9Context *s = avctx->priv_data;
  40. if (avctx->active_thread_type & FF_THREAD_SLICE) {
  41. pthread_mutex_destroy(&s->progress_mutex);
  42. pthread_cond_destroy(&s->progress_cond);
  43. av_freep(&s->entries);
  44. }
  45. }
  46. static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
  47. VP9Context *s = avctx->priv_data;
  48. int i;
  49. if (avctx->active_thread_type & FF_THREAD_SLICE) {
  50. if (s->entries)
  51. av_freep(&s->entries);
  52. s->entries = av_malloc_array(n, sizeof(atomic_int));
  53. if (!s->entries) {
  54. av_freep(&s->entries);
  55. return AVERROR(ENOMEM);
  56. }
  57. for (i = 0; i < n; i++)
  58. atomic_init(&s->entries[i], 0);
  59. pthread_mutex_init(&s->progress_mutex, NULL);
  60. pthread_cond_init(&s->progress_cond, NULL);
  61. }
  62. return 0;
  63. }
  64. static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
  65. pthread_mutex_lock(&s->progress_mutex);
  66. atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
  67. pthread_cond_signal(&s->progress_cond);
  68. pthread_mutex_unlock(&s->progress_mutex);
  69. }
  70. static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
  71. if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
  72. return;
  73. pthread_mutex_lock(&s->progress_mutex);
  74. while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
  75. pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
  76. pthread_mutex_unlock(&s->progress_mutex);
  77. }
  78. #else
  79. static void vp9_free_entries(AVCodecContext *avctx) {}
  80. static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
  81. #endif
  82. static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
  83. {
  84. ff_thread_release_buffer(avctx, &f->tf);
  85. av_buffer_unref(&f->extradata);
  86. av_buffer_unref(&f->hwaccel_priv_buf);
  87. f->segmentation_map = NULL;
  88. f->hwaccel_picture_private = NULL;
  89. }
  90. static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
  91. {
  92. VP9Context *s = avctx->priv_data;
  93. int ret, sz;
  94. ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF);
  95. if (ret < 0)
  96. return ret;
  97. sz = 64 * s->sb_cols * s->sb_rows;
  98. f->extradata = av_buffer_allocz(sz * (1 + sizeof(VP9mvrefPair)));
  99. if (!f->extradata) {
  100. goto fail;
  101. }
  102. f->segmentation_map = f->extradata->data;
  103. f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
  104. if (avctx->hwaccel) {
  105. const AVHWAccel *hwaccel = avctx->hwaccel;
  106. av_assert0(!f->hwaccel_picture_private);
  107. if (hwaccel->frame_priv_data_size) {
  108. f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
  109. if (!f->hwaccel_priv_buf)
  110. goto fail;
  111. f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
  112. }
  113. }
  114. return 0;
  115. fail:
  116. vp9_frame_unref(avctx, f);
  117. return AVERROR(ENOMEM);
  118. }
  119. static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
  120. {
  121. int ret;
  122. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  123. if (ret < 0)
  124. return ret;
  125. dst->extradata = av_buffer_ref(src->extradata);
  126. if (!dst->extradata)
  127. goto fail;
  128. dst->segmentation_map = src->segmentation_map;
  129. dst->mv = src->mv;
  130. dst->uses_2pass = src->uses_2pass;
  131. if (src->hwaccel_picture_private) {
  132. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  133. if (!dst->hwaccel_priv_buf)
  134. goto fail;
  135. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  136. }
  137. return 0;
  138. fail:
  139. vp9_frame_unref(avctx, dst);
  140. return AVERROR(ENOMEM);
  141. }
  142. static int update_size(AVCodecContext *avctx, int w, int h)
  143. {
  144. #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
  145. CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
  146. CONFIG_VP9_NVDEC_HWACCEL + \
  147. CONFIG_VP9_VAAPI_HWACCEL)
  148. enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
  149. VP9Context *s = avctx->priv_data;
  150. uint8_t *p;
  151. int bytesperpixel = s->bytesperpixel, ret, cols, rows;
  152. int lflvl_len, i;
  153. av_assert0(w > 0 && h > 0);
  154. if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
  155. if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
  156. return ret;
  157. switch (s->pix_fmt) {
  158. case AV_PIX_FMT_YUV420P:
  159. case AV_PIX_FMT_YUV420P10:
  160. #if CONFIG_VP9_DXVA2_HWACCEL
  161. *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
  162. #endif
  163. #if CONFIG_VP9_D3D11VA_HWACCEL
  164. *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
  165. *fmtp++ = AV_PIX_FMT_D3D11;
  166. #endif
  167. #if CONFIG_VP9_NVDEC_HWACCEL
  168. *fmtp++ = AV_PIX_FMT_CUDA;
  169. #endif
  170. #if CONFIG_VP9_VAAPI_HWACCEL
  171. *fmtp++ = AV_PIX_FMT_VAAPI;
  172. #endif
  173. break;
  174. case AV_PIX_FMT_YUV420P12:
  175. #if CONFIG_VP9_NVDEC_HWACCEL
  176. *fmtp++ = AV_PIX_FMT_CUDA;
  177. #endif
  178. #if CONFIG_VP9_VAAPI_HWACCEL
  179. *fmtp++ = AV_PIX_FMT_VAAPI;
  180. #endif
  181. break;
  182. }
  183. *fmtp++ = s->pix_fmt;
  184. *fmtp = AV_PIX_FMT_NONE;
  185. ret = ff_thread_get_format(avctx, pix_fmts);
  186. if (ret < 0)
  187. return ret;
  188. avctx->pix_fmt = ret;
  189. s->gf_fmt = s->pix_fmt;
  190. s->w = w;
  191. s->h = h;
  192. }
  193. cols = (w + 7) >> 3;
  194. rows = (h + 7) >> 3;
  195. if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
  196. return 0;
  197. s->last_fmt = s->pix_fmt;
  198. s->sb_cols = (w + 63) >> 6;
  199. s->sb_rows = (h + 63) >> 6;
  200. s->cols = (w + 7) >> 3;
  201. s->rows = (h + 7) >> 3;
  202. lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
  203. #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
  204. av_freep(&s->intra_pred_data[0]);
  205. // FIXME we slightly over-allocate here for subsampled chroma, but a little
  206. // bit of padding shouldn't affect performance...
  207. p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
  208. lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
  209. if (!p)
  210. return AVERROR(ENOMEM);
  211. assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
  212. assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
  213. assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
  214. assign(s->above_y_nnz_ctx, uint8_t *, 16);
  215. assign(s->above_mode_ctx, uint8_t *, 16);
  216. assign(s->above_mv_ctx, VP56mv(*)[2], 16);
  217. assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
  218. assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
  219. assign(s->above_partition_ctx, uint8_t *, 8);
  220. assign(s->above_skip_ctx, uint8_t *, 8);
  221. assign(s->above_txfm_ctx, uint8_t *, 8);
  222. assign(s->above_segpred_ctx, uint8_t *, 8);
  223. assign(s->above_intra_ctx, uint8_t *, 8);
  224. assign(s->above_comp_ctx, uint8_t *, 8);
  225. assign(s->above_ref_ctx, uint8_t *, 8);
  226. assign(s->above_filter_ctx, uint8_t *, 8);
  227. assign(s->lflvl, VP9Filter *, lflvl_len);
  228. #undef assign
  229. if (s->td) {
  230. for (i = 0; i < s->active_tile_cols; i++) {
  231. av_freep(&s->td[i].b_base);
  232. av_freep(&s->td[i].block_base);
  233. }
  234. }
  235. if (s->s.h.bpp != s->last_bpp) {
  236. ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
  237. ff_videodsp_init(&s->vdsp, s->s.h.bpp);
  238. s->last_bpp = s->s.h.bpp;
  239. }
  240. return 0;
  241. }
  242. static int update_block_buffers(AVCodecContext *avctx)
  243. {
  244. int i;
  245. VP9Context *s = avctx->priv_data;
  246. int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
  247. VP9TileData *td = &s->td[0];
  248. if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
  249. return 0;
  250. av_free(td->b_base);
  251. av_free(td->block_base);
  252. chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
  253. chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
  254. if (s->s.frames[CUR_FRAME].uses_2pass) {
  255. int sbs = s->sb_cols * s->sb_rows;
  256. td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
  257. td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
  258. 16 * 16 + 2 * chroma_eobs) * sbs);
  259. if (!td->b_base || !td->block_base)
  260. return AVERROR(ENOMEM);
  261. td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
  262. td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
  263. td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
  264. td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
  265. td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
  266. } else {
  267. for (i = 1; i < s->active_tile_cols; i++) {
  268. if (s->td[i].b_base && s->td[i].block_base) {
  269. av_free(s->td[i].b_base);
  270. av_free(s->td[i].block_base);
  271. }
  272. }
  273. for (i = 0; i < s->active_tile_cols; i++) {
  274. s->td[i].b_base = av_malloc(sizeof(VP9Block));
  275. s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
  276. 16 * 16 + 2 * chroma_eobs);
  277. if (!s->td[i].b_base || !s->td[i].block_base)
  278. return AVERROR(ENOMEM);
  279. s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
  280. s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
  281. s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
  282. s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
  283. s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
  284. }
  285. }
  286. s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
  287. return 0;
  288. }
  289. // The sign bit is at the end, not the start, of a bit sequence
  290. static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
  291. {
  292. int v = get_bits(gb, n);
  293. return get_bits1(gb) ? -v : v;
  294. }
  295. static av_always_inline int inv_recenter_nonneg(int v, int m)
  296. {
  297. if (v > 2 * m)
  298. return v;
  299. if (v & 1)
  300. return m - ((v + 1) >> 1);
  301. return m + (v >> 1);
  302. }
  303. // differential forward probability updates
  304. static int update_prob(VP56RangeCoder *c, int p)
  305. {
  306. static const uint8_t inv_map_table[255] = {
  307. 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
  308. 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
  309. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
  310. 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
  311. 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
  312. 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  313. 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
  314. 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
  315. 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
  316. 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
  317. 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
  318. 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
  319. 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
  320. 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
  321. 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
  322. 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
  323. 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
  324. 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
  325. 252, 253, 253,
  326. };
  327. int d;
  328. /* This code is trying to do a differential probability update. For a
  329. * current probability A in the range [1, 255], the difference to a new
  330. * probability of any value can be expressed differentially as 1-A, 255-A
  331. * where some part of this (absolute range) exists both in positive as
  332. * well as the negative part, whereas another part only exists in one
  333. * half. We're trying to code this shared part differentially, i.e.
  334. * times two where the value of the lowest bit specifies the sign, and
  335. * the single part is then coded on top of this. This absolute difference
  336. * then again has a value of [0, 254], but a bigger value in this range
  337. * indicates that we're further away from the original value A, so we
  338. * can code this as a VLC code, since higher values are increasingly
  339. * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
  340. * updates vs. the 'fine, exact' updates further down the range, which
  341. * adds one extra dimension to this differential update model. */
  342. if (!vp8_rac_get(c)) {
  343. d = vp8_rac_get_uint(c, 4) + 0;
  344. } else if (!vp8_rac_get(c)) {
  345. d = vp8_rac_get_uint(c, 4) + 16;
  346. } else if (!vp8_rac_get(c)) {
  347. d = vp8_rac_get_uint(c, 5) + 32;
  348. } else {
  349. d = vp8_rac_get_uint(c, 7);
  350. if (d >= 65)
  351. d = (d << 1) - 65 + vp8_rac_get(c);
  352. d += 64;
  353. av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
  354. }
  355. return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
  356. 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
  357. }
  358. static int read_colorspace_details(AVCodecContext *avctx)
  359. {
  360. static const enum AVColorSpace colorspaces[8] = {
  361. AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_BT470BG, AVCOL_SPC_BT709, AVCOL_SPC_SMPTE170M,
  362. AVCOL_SPC_SMPTE240M, AVCOL_SPC_BT2020_NCL, AVCOL_SPC_RESERVED, AVCOL_SPC_RGB,
  363. };
  364. VP9Context *s = avctx->priv_data;
  365. int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
  366. s->bpp_index = bits;
  367. s->s.h.bpp = 8 + bits * 2;
  368. s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
  369. avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
  370. if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
  371. static const enum AVPixelFormat pix_fmt_rgb[3] = {
  372. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12
  373. };
  374. s->ss_h = s->ss_v = 0;
  375. avctx->color_range = AVCOL_RANGE_JPEG;
  376. s->pix_fmt = pix_fmt_rgb[bits];
  377. if (avctx->profile & 1) {
  378. if (get_bits1(&s->gb)) {
  379. av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
  380. return AVERROR_INVALIDDATA;
  381. }
  382. } else {
  383. av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
  384. avctx->profile);
  385. return AVERROR_INVALIDDATA;
  386. }
  387. } else {
  388. static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
  389. { { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P },
  390. { AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV420P } },
  391. { { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10 },
  392. { AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV420P10 } },
  393. { { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12 },
  394. { AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12 } }
  395. };
  396. avctx->color_range = get_bits1(&s->gb) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
  397. if (avctx->profile & 1) {
  398. s->ss_h = get_bits1(&s->gb);
  399. s->ss_v = get_bits1(&s->gb);
  400. s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
  401. if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
  402. av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
  403. avctx->profile);
  404. return AVERROR_INVALIDDATA;
  405. } else if (get_bits1(&s->gb)) {
  406. av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
  407. avctx->profile);
  408. return AVERROR_INVALIDDATA;
  409. }
  410. } else {
  411. s->ss_h = s->ss_v = 1;
  412. s->pix_fmt = pix_fmt_for_ss[bits][1][1];
  413. }
  414. }
  415. return 0;
  416. }
  417. static int decode_frame_header(AVCodecContext *avctx,
  418. const uint8_t *data, int size, int *ref)
  419. {
  420. VP9Context *s = avctx->priv_data;
  421. int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
  422. int last_invisible;
  423. const uint8_t *data2;
  424. /* general header */
  425. if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
  426. av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
  427. return ret;
  428. }
  429. if (get_bits(&s->gb, 2) != 0x2) { // frame marker
  430. av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
  431. return AVERROR_INVALIDDATA;
  432. }
  433. avctx->profile = get_bits1(&s->gb);
  434. avctx->profile |= get_bits1(&s->gb) << 1;
  435. if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
  436. if (avctx->profile > 3) {
  437. av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
  438. return AVERROR_INVALIDDATA;
  439. }
  440. s->s.h.profile = avctx->profile;
  441. if (get_bits1(&s->gb)) {
  442. *ref = get_bits(&s->gb, 3);
  443. return 0;
  444. }
  445. s->last_keyframe = s->s.h.keyframe;
  446. s->s.h.keyframe = !get_bits1(&s->gb);
  447. last_invisible = s->s.h.invisible;
  448. s->s.h.invisible = !get_bits1(&s->gb);
  449. s->s.h.errorres = get_bits1(&s->gb);
  450. s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
  451. if (s->s.h.keyframe) {
  452. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  453. av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
  454. return AVERROR_INVALIDDATA;
  455. }
  456. if ((ret = read_colorspace_details(avctx)) < 0)
  457. return ret;
  458. // for profile 1, here follows the subsampling bits
  459. s->s.h.refreshrefmask = 0xff;
  460. w = get_bits(&s->gb, 16) + 1;
  461. h = get_bits(&s->gb, 16) + 1;
  462. if (get_bits1(&s->gb)) // display size
  463. skip_bits(&s->gb, 32);
  464. } else {
  465. s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
  466. s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
  467. if (s->s.h.intraonly) {
  468. if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  469. av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
  470. return AVERROR_INVALIDDATA;
  471. }
  472. if (avctx->profile >= 1) {
  473. if ((ret = read_colorspace_details(avctx)) < 0)
  474. return ret;
  475. } else {
  476. s->ss_h = s->ss_v = 1;
  477. s->s.h.bpp = 8;
  478. s->bpp_index = 0;
  479. s->bytesperpixel = 1;
  480. s->pix_fmt = AV_PIX_FMT_YUV420P;
  481. avctx->colorspace = AVCOL_SPC_BT470BG;
  482. avctx->color_range = AVCOL_RANGE_MPEG;
  483. }
  484. s->s.h.refreshrefmask = get_bits(&s->gb, 8);
  485. w = get_bits(&s->gb, 16) + 1;
  486. h = get_bits(&s->gb, 16) + 1;
  487. if (get_bits1(&s->gb)) // display size
  488. skip_bits(&s->gb, 32);
  489. } else {
  490. s->s.h.refreshrefmask = get_bits(&s->gb, 8);
  491. s->s.h.refidx[0] = get_bits(&s->gb, 3);
  492. s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
  493. s->s.h.refidx[1] = get_bits(&s->gb, 3);
  494. s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
  495. s->s.h.refidx[2] = get_bits(&s->gb, 3);
  496. s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
  497. if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
  498. !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
  499. !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
  500. av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
  501. return AVERROR_INVALIDDATA;
  502. }
  503. if (get_bits1(&s->gb)) {
  504. w = s->s.refs[s->s.h.refidx[0]].f->width;
  505. h = s->s.refs[s->s.h.refidx[0]].f->height;
  506. } else if (get_bits1(&s->gb)) {
  507. w = s->s.refs[s->s.h.refidx[1]].f->width;
  508. h = s->s.refs[s->s.h.refidx[1]].f->height;
  509. } else if (get_bits1(&s->gb)) {
  510. w = s->s.refs[s->s.h.refidx[2]].f->width;
  511. h = s->s.refs[s->s.h.refidx[2]].f->height;
  512. } else {
  513. w = get_bits(&s->gb, 16) + 1;
  514. h = get_bits(&s->gb, 16) + 1;
  515. }
  516. // Note that in this code, "CUR_FRAME" is actually before we
  517. // have formally allocated a frame, and thus actually represents
  518. // the _last_ frame
  519. s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
  520. s->s.frames[CUR_FRAME].tf.f->height == h;
  521. if (get_bits1(&s->gb)) // display size
  522. skip_bits(&s->gb, 32);
  523. s->s.h.highprecisionmvs = get_bits1(&s->gb);
  524. s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
  525. get_bits(&s->gb, 2);
  526. s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
  527. s->s.h.signbias[0] != s->s.h.signbias[2];
  528. if (s->s.h.allowcompinter) {
  529. if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
  530. s->s.h.fixcompref = 2;
  531. s->s.h.varcompref[0] = 0;
  532. s->s.h.varcompref[1] = 1;
  533. } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
  534. s->s.h.fixcompref = 1;
  535. s->s.h.varcompref[0] = 0;
  536. s->s.h.varcompref[1] = 2;
  537. } else {
  538. s->s.h.fixcompref = 0;
  539. s->s.h.varcompref[0] = 1;
  540. s->s.h.varcompref[1] = 2;
  541. }
  542. }
  543. }
  544. }
  545. s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
  546. s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
  547. s->s.h.framectxid = c = get_bits(&s->gb, 2);
  548. if (s->s.h.keyframe || s->s.h.intraonly)
  549. s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
  550. /* loopfilter header data */
  551. if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
  552. // reset loopfilter defaults
  553. s->s.h.lf_delta.ref[0] = 1;
  554. s->s.h.lf_delta.ref[1] = 0;
  555. s->s.h.lf_delta.ref[2] = -1;
  556. s->s.h.lf_delta.ref[3] = -1;
  557. s->s.h.lf_delta.mode[0] = 0;
  558. s->s.h.lf_delta.mode[1] = 0;
  559. memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
  560. }
  561. s->s.h.filter.level = get_bits(&s->gb, 6);
  562. sharp = get_bits(&s->gb, 3);
  563. // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
  564. // the old cache values since they are still valid
  565. if (s->s.h.filter.sharpness != sharp) {
  566. for (i = 1; i <= 63; i++) {
  567. int limit = i;
  568. if (sharp > 0) {
  569. limit >>= (sharp + 3) >> 2;
  570. limit = FFMIN(limit, 9 - sharp);
  571. }
  572. limit = FFMAX(limit, 1);
  573. s->filter_lut.lim_lut[i] = limit;
  574. s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
  575. }
  576. }
  577. s->s.h.filter.sharpness = sharp;
  578. if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
  579. if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
  580. for (i = 0; i < 4; i++)
  581. if (get_bits1(&s->gb))
  582. s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
  583. for (i = 0; i < 2; i++)
  584. if (get_bits1(&s->gb))
  585. s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
  586. }
  587. }
  588. /* quantization header data */
  589. s->s.h.yac_qi = get_bits(&s->gb, 8);
  590. s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  591. s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  592. s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  593. s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
  594. s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
  595. if (s->s.h.lossless)
  596. avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
  597. /* segmentation header info */
  598. if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
  599. if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
  600. for (i = 0; i < 7; i++)
  601. s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
  602. get_bits(&s->gb, 8) : 255;
  603. if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
  604. for (i = 0; i < 3; i++)
  605. s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
  606. get_bits(&s->gb, 8) : 255;
  607. }
  608. if (get_bits1(&s->gb)) {
  609. s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
  610. for (i = 0; i < 8; i++) {
  611. if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
  612. s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
  613. if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
  614. s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
  615. if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
  616. s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
  617. s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
  618. }
  619. }
  620. }
  621. // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
  622. for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
  623. int qyac, qydc, quvac, quvdc, lflvl, sh;
  624. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
  625. if (s->s.h.segmentation.absolute_vals)
  626. qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
  627. else
  628. qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
  629. } else {
  630. qyac = s->s.h.yac_qi;
  631. }
  632. qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
  633. quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
  634. quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
  635. qyac = av_clip_uintp2(qyac, 8);
  636. s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
  637. s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
  638. s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
  639. s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
  640. sh = s->s.h.filter.level >= 32;
  641. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
  642. if (s->s.h.segmentation.absolute_vals)
  643. lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
  644. else
  645. lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
  646. } else {
  647. lflvl = s->s.h.filter.level;
  648. }
  649. if (s->s.h.lf_delta.enabled) {
  650. s->s.h.segmentation.feat[i].lflvl[0][0] =
  651. s->s.h.segmentation.feat[i].lflvl[0][1] =
  652. av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
  653. for (j = 1; j < 4; j++) {
  654. s->s.h.segmentation.feat[i].lflvl[j][0] =
  655. av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
  656. s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
  657. s->s.h.segmentation.feat[i].lflvl[j][1] =
  658. av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
  659. s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
  660. }
  661. } else {
  662. memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
  663. sizeof(s->s.h.segmentation.feat[i].lflvl));
  664. }
  665. }
  666. /* tiling info */
  667. if ((ret = update_size(avctx, w, h)) < 0) {
  668. av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
  669. w, h, s->pix_fmt);
  670. return ret;
  671. }
  672. for (s->s.h.tiling.log2_tile_cols = 0;
  673. s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
  674. s->s.h.tiling.log2_tile_cols++) ;
  675. for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
  676. max = FFMAX(0, max - 1);
  677. while (max > s->s.h.tiling.log2_tile_cols) {
  678. if (get_bits1(&s->gb))
  679. s->s.h.tiling.log2_tile_cols++;
  680. else
  681. break;
  682. }
  683. s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
  684. s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
  685. if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
  686. int n_range_coders;
  687. VP56RangeCoder *rc;
  688. if (s->td) {
  689. for (i = 0; i < s->active_tile_cols; i++) {
  690. av_free(s->td[i].b_base);
  691. av_free(s->td[i].block_base);
  692. }
  693. av_free(s->td);
  694. }
  695. s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
  696. vp9_free_entries(avctx);
  697. s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
  698. s->s.h.tiling.tile_cols : 1;
  699. vp9_alloc_entries(avctx, s->sb_rows);
  700. if (avctx->active_thread_type == FF_THREAD_SLICE) {
  701. n_range_coders = 4; // max_tile_rows
  702. } else {
  703. n_range_coders = s->s.h.tiling.tile_cols;
  704. }
  705. s->td = av_mallocz_array(s->active_tile_cols, sizeof(VP9TileData) +
  706. n_range_coders * sizeof(VP56RangeCoder));
  707. if (!s->td)
  708. return AVERROR(ENOMEM);
  709. rc = (VP56RangeCoder *) &s->td[s->active_tile_cols];
  710. for (i = 0; i < s->active_tile_cols; i++) {
  711. s->td[i].s = s;
  712. s->td[i].c_b = rc;
  713. rc += n_range_coders;
  714. }
  715. }
  716. /* check reference frames */
  717. if (!s->s.h.keyframe && !s->s.h.intraonly) {
  718. for (i = 0; i < 3; i++) {
  719. AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
  720. int refw = ref->width, refh = ref->height;
  721. if (ref->format != avctx->pix_fmt) {
  722. av_log(avctx, AV_LOG_ERROR,
  723. "Ref pixfmt (%s) did not match current frame (%s)",
  724. av_get_pix_fmt_name(ref->format),
  725. av_get_pix_fmt_name(avctx->pix_fmt));
  726. return AVERROR_INVALIDDATA;
  727. } else if (refw == w && refh == h) {
  728. s->mvscale[i][0] = s->mvscale[i][1] = 0;
  729. } else {
  730. if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
  731. av_log(avctx, AV_LOG_ERROR,
  732. "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
  733. refw, refh, w, h);
  734. return AVERROR_INVALIDDATA;
  735. }
  736. s->mvscale[i][0] = (refw << 14) / w;
  737. s->mvscale[i][1] = (refh << 14) / h;
  738. s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
  739. s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
  740. }
  741. }
  742. }
  743. if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
  744. s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
  745. s->prob_ctx[3].p = ff_vp9_default_probs;
  746. memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
  747. sizeof(ff_vp9_default_coef_probs));
  748. memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
  749. sizeof(ff_vp9_default_coef_probs));
  750. memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
  751. sizeof(ff_vp9_default_coef_probs));
  752. memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
  753. sizeof(ff_vp9_default_coef_probs));
  754. } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
  755. s->prob_ctx[c].p = ff_vp9_default_probs;
  756. memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
  757. sizeof(ff_vp9_default_coef_probs));
  758. }
  759. // next 16 bits is size of the rest of the header (arith-coded)
  760. s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
  761. s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
  762. data2 = align_get_bits(&s->gb);
  763. if (size2 > size - (data2 - data)) {
  764. av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
  765. return AVERROR_INVALIDDATA;
  766. }
  767. ret = ff_vp56_init_range_decoder(&s->c, data2, size2);
  768. if (ret < 0)
  769. return ret;
  770. if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
  771. av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
  772. return AVERROR_INVALIDDATA;
  773. }
  774. for (i = 0; i < s->active_tile_cols; i++) {
  775. if (s->s.h.keyframe || s->s.h.intraonly) {
  776. memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
  777. memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
  778. } else {
  779. memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
  780. }
  781. }
  782. /* FIXME is it faster to not copy here, but do it down in the fw updates
  783. * as explicit copies if the fw update is missing (and skip the copy upon
  784. * fw update)? */
  785. s->prob.p = s->prob_ctx[c].p;
  786. // txfm updates
  787. if (s->s.h.lossless) {
  788. s->s.h.txfmmode = TX_4X4;
  789. } else {
  790. s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
  791. if (s->s.h.txfmmode == 3)
  792. s->s.h.txfmmode += vp8_rac_get(&s->c);
  793. if (s->s.h.txfmmode == TX_SWITCHABLE) {
  794. for (i = 0; i < 2; i++)
  795. if (vp56_rac_get_prob_branchy(&s->c, 252))
  796. s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
  797. for (i = 0; i < 2; i++)
  798. for (j = 0; j < 2; j++)
  799. if (vp56_rac_get_prob_branchy(&s->c, 252))
  800. s->prob.p.tx16p[i][j] =
  801. update_prob(&s->c, s->prob.p.tx16p[i][j]);
  802. for (i = 0; i < 2; i++)
  803. for (j = 0; j < 3; j++)
  804. if (vp56_rac_get_prob_branchy(&s->c, 252))
  805. s->prob.p.tx32p[i][j] =
  806. update_prob(&s->c, s->prob.p.tx32p[i][j]);
  807. }
  808. }
  809. // coef updates
  810. for (i = 0; i < 4; i++) {
  811. uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
  812. if (vp8_rac_get(&s->c)) {
  813. for (j = 0; j < 2; j++)
  814. for (k = 0; k < 2; k++)
  815. for (l = 0; l < 6; l++)
  816. for (m = 0; m < 6; m++) {
  817. uint8_t *p = s->prob.coef[i][j][k][l][m];
  818. uint8_t *r = ref[j][k][l][m];
  819. if (m >= 3 && l == 0) // dc only has 3 pt
  820. break;
  821. for (n = 0; n < 3; n++) {
  822. if (vp56_rac_get_prob_branchy(&s->c, 252))
  823. p[n] = update_prob(&s->c, r[n]);
  824. else
  825. p[n] = r[n];
  826. }
  827. memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
  828. }
  829. } else {
  830. for (j = 0; j < 2; j++)
  831. for (k = 0; k < 2; k++)
  832. for (l = 0; l < 6; l++)
  833. for (m = 0; m < 6; m++) {
  834. uint8_t *p = s->prob.coef[i][j][k][l][m];
  835. uint8_t *r = ref[j][k][l][m];
  836. if (m > 3 && l == 0) // dc only has 3 pt
  837. break;
  838. memcpy(p, r, 3);
  839. memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
  840. }
  841. }
  842. if (s->s.h.txfmmode == i)
  843. break;
  844. }
  845. // mode updates
  846. for (i = 0; i < 3; i++)
  847. if (vp56_rac_get_prob_branchy(&s->c, 252))
  848. s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
  849. if (!s->s.h.keyframe && !s->s.h.intraonly) {
  850. for (i = 0; i < 7; i++)
  851. for (j = 0; j < 3; j++)
  852. if (vp56_rac_get_prob_branchy(&s->c, 252))
  853. s->prob.p.mv_mode[i][j] =
  854. update_prob(&s->c, s->prob.p.mv_mode[i][j]);
  855. if (s->s.h.filtermode == FILTER_SWITCHABLE)
  856. for (i = 0; i < 4; i++)
  857. for (j = 0; j < 2; j++)
  858. if (vp56_rac_get_prob_branchy(&s->c, 252))
  859. s->prob.p.filter[i][j] =
  860. update_prob(&s->c, s->prob.p.filter[i][j]);
  861. for (i = 0; i < 4; i++)
  862. if (vp56_rac_get_prob_branchy(&s->c, 252))
  863. s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
  864. if (s->s.h.allowcompinter) {
  865. s->s.h.comppredmode = vp8_rac_get(&s->c);
  866. if (s->s.h.comppredmode)
  867. s->s.h.comppredmode += vp8_rac_get(&s->c);
  868. if (s->s.h.comppredmode == PRED_SWITCHABLE)
  869. for (i = 0; i < 5; i++)
  870. if (vp56_rac_get_prob_branchy(&s->c, 252))
  871. s->prob.p.comp[i] =
  872. update_prob(&s->c, s->prob.p.comp[i]);
  873. } else {
  874. s->s.h.comppredmode = PRED_SINGLEREF;
  875. }
  876. if (s->s.h.comppredmode != PRED_COMPREF) {
  877. for (i = 0; i < 5; i++) {
  878. if (vp56_rac_get_prob_branchy(&s->c, 252))
  879. s->prob.p.single_ref[i][0] =
  880. update_prob(&s->c, s->prob.p.single_ref[i][0]);
  881. if (vp56_rac_get_prob_branchy(&s->c, 252))
  882. s->prob.p.single_ref[i][1] =
  883. update_prob(&s->c, s->prob.p.single_ref[i][1]);
  884. }
  885. }
  886. if (s->s.h.comppredmode != PRED_SINGLEREF) {
  887. for (i = 0; i < 5; i++)
  888. if (vp56_rac_get_prob_branchy(&s->c, 252))
  889. s->prob.p.comp_ref[i] =
  890. update_prob(&s->c, s->prob.p.comp_ref[i]);
  891. }
  892. for (i = 0; i < 4; i++)
  893. for (j = 0; j < 9; j++)
  894. if (vp56_rac_get_prob_branchy(&s->c, 252))
  895. s->prob.p.y_mode[i][j] =
  896. update_prob(&s->c, s->prob.p.y_mode[i][j]);
  897. for (i = 0; i < 4; i++)
  898. for (j = 0; j < 4; j++)
  899. for (k = 0; k < 3; k++)
  900. if (vp56_rac_get_prob_branchy(&s->c, 252))
  901. s->prob.p.partition[3 - i][j][k] =
  902. update_prob(&s->c,
  903. s->prob.p.partition[3 - i][j][k]);
  904. // mv fields don't use the update_prob subexp model for some reason
  905. for (i = 0; i < 3; i++)
  906. if (vp56_rac_get_prob_branchy(&s->c, 252))
  907. s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  908. for (i = 0; i < 2; i++) {
  909. if (vp56_rac_get_prob_branchy(&s->c, 252))
  910. s->prob.p.mv_comp[i].sign =
  911. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  912. for (j = 0; j < 10; j++)
  913. if (vp56_rac_get_prob_branchy(&s->c, 252))
  914. s->prob.p.mv_comp[i].classes[j] =
  915. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  916. if (vp56_rac_get_prob_branchy(&s->c, 252))
  917. s->prob.p.mv_comp[i].class0 =
  918. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  919. for (j = 0; j < 10; j++)
  920. if (vp56_rac_get_prob_branchy(&s->c, 252))
  921. s->prob.p.mv_comp[i].bits[j] =
  922. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  923. }
  924. for (i = 0; i < 2; i++) {
  925. for (j = 0; j < 2; j++)
  926. for (k = 0; k < 3; k++)
  927. if (vp56_rac_get_prob_branchy(&s->c, 252))
  928. s->prob.p.mv_comp[i].class0_fp[j][k] =
  929. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  930. for (j = 0; j < 3; j++)
  931. if (vp56_rac_get_prob_branchy(&s->c, 252))
  932. s->prob.p.mv_comp[i].fp[j] =
  933. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  934. }
  935. if (s->s.h.highprecisionmvs) {
  936. for (i = 0; i < 2; i++) {
  937. if (vp56_rac_get_prob_branchy(&s->c, 252))
  938. s->prob.p.mv_comp[i].class0_hp =
  939. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  940. if (vp56_rac_get_prob_branchy(&s->c, 252))
  941. s->prob.p.mv_comp[i].hp =
  942. (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
  943. }
  944. }
  945. }
  946. return (data2 - data) + size2;
  947. }
  948. static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
  949. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  950. {
  951. const VP9Context *s = td->s;
  952. int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
  953. (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
  954. const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
  955. s->prob.p.partition[bl][c];
  956. enum BlockPartition bp;
  957. ptrdiff_t hbs = 4 >> bl;
  958. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  959. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  960. int bytesperpixel = s->bytesperpixel;
  961. if (bl == BL_8X8) {
  962. bp = vp8_rac_get_tree(td->c, ff_vp9_partition_tree, p);
  963. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  964. } else if (col + hbs < s->cols) { // FIXME why not <=?
  965. if (row + hbs < s->rows) { // FIXME why not <=?
  966. bp = vp8_rac_get_tree(td->c, ff_vp9_partition_tree, p);
  967. switch (bp) {
  968. case PARTITION_NONE:
  969. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  970. break;
  971. case PARTITION_H:
  972. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  973. yoff += hbs * 8 * y_stride;
  974. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  975. ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
  976. break;
  977. case PARTITION_V:
  978. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  979. yoff += hbs * 8 * bytesperpixel;
  980. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  981. ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
  982. break;
  983. case PARTITION_SPLIT:
  984. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  985. decode_sb(td, row, col + hbs, lflvl,
  986. yoff + 8 * hbs * bytesperpixel,
  987. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  988. yoff += hbs * 8 * y_stride;
  989. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  990. decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  991. decode_sb(td, row + hbs, col + hbs, lflvl,
  992. yoff + 8 * hbs * bytesperpixel,
  993. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  994. break;
  995. default:
  996. av_assert0(0);
  997. }
  998. } else if (vp56_rac_get_prob_branchy(td->c, p[1])) {
  999. bp = PARTITION_SPLIT;
  1000. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1001. decode_sb(td, row, col + hbs, lflvl,
  1002. yoff + 8 * hbs * bytesperpixel,
  1003. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1004. } else {
  1005. bp = PARTITION_H;
  1006. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1007. }
  1008. } else if (row + hbs < s->rows) { // FIXME why not <=?
  1009. if (vp56_rac_get_prob_branchy(td->c, p[2])) {
  1010. bp = PARTITION_SPLIT;
  1011. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1012. yoff += hbs * 8 * y_stride;
  1013. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1014. decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  1015. } else {
  1016. bp = PARTITION_V;
  1017. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1018. }
  1019. } else {
  1020. bp = PARTITION_SPLIT;
  1021. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1022. }
  1023. td->counts.partition[bl][c][bp]++;
  1024. }
  1025. static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
  1026. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  1027. {
  1028. const VP9Context *s = td->s;
  1029. VP9Block *b = td->b;
  1030. ptrdiff_t hbs = 4 >> bl;
  1031. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  1032. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  1033. int bytesperpixel = s->bytesperpixel;
  1034. if (bl == BL_8X8) {
  1035. av_assert2(b->bl == BL_8X8);
  1036. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  1037. } else if (td->b->bl == bl) {
  1038. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  1039. if (b->bp == PARTITION_H && row + hbs < s->rows) {
  1040. yoff += hbs * 8 * y_stride;
  1041. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1042. ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
  1043. } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
  1044. yoff += hbs * 8 * bytesperpixel;
  1045. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  1046. ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
  1047. }
  1048. } else {
  1049. decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1050. if (col + hbs < s->cols) { // FIXME why not <=?
  1051. if (row + hbs < s->rows) {
  1052. decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
  1053. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1054. yoff += hbs * 8 * y_stride;
  1055. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1056. decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  1057. decode_sb_mem(td, row + hbs, col + hbs, lflvl,
  1058. yoff + 8 * hbs * bytesperpixel,
  1059. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1060. } else {
  1061. yoff += hbs * 8 * bytesperpixel;
  1062. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  1063. decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
  1064. }
  1065. } else if (row + hbs < s->rows) {
  1066. yoff += hbs * 8 * y_stride;
  1067. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1068. decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  1069. }
  1070. }
  1071. }
  1072. static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
  1073. {
  1074. int sb_start = ( idx * n) >> log2_n;
  1075. int sb_end = ((idx + 1) * n) >> log2_n;
  1076. *start = FFMIN(sb_start, n) << 3;
  1077. *end = FFMIN(sb_end, n) << 3;
  1078. }
  1079. static void free_buffers(VP9Context *s)
  1080. {
  1081. int i;
  1082. av_freep(&s->intra_pred_data[0]);
  1083. for (i = 0; i < s->active_tile_cols; i++) {
  1084. av_freep(&s->td[i].b_base);
  1085. av_freep(&s->td[i].block_base);
  1086. }
  1087. }
  1088. static av_cold int vp9_decode_free(AVCodecContext *avctx)
  1089. {
  1090. VP9Context *s = avctx->priv_data;
  1091. int i;
  1092. for (i = 0; i < 3; i++) {
  1093. if (s->s.frames[i].tf.f->buf[0])
  1094. vp9_frame_unref(avctx, &s->s.frames[i]);
  1095. av_frame_free(&s->s.frames[i].tf.f);
  1096. }
  1097. for (i = 0; i < 8; i++) {
  1098. if (s->s.refs[i].f->buf[0])
  1099. ff_thread_release_buffer(avctx, &s->s.refs[i]);
  1100. av_frame_free(&s->s.refs[i].f);
  1101. if (s->next_refs[i].f->buf[0])
  1102. ff_thread_release_buffer(avctx, &s->next_refs[i]);
  1103. av_frame_free(&s->next_refs[i].f);
  1104. }
  1105. free_buffers(s);
  1106. vp9_free_entries(avctx);
  1107. av_freep(&s->td);
  1108. return 0;
  1109. }
  1110. static int decode_tiles(AVCodecContext *avctx,
  1111. const uint8_t *data, int size)
  1112. {
  1113. VP9Context *s = avctx->priv_data;
  1114. VP9TileData *td = &s->td[0];
  1115. int row, col, tile_row, tile_col, ret;
  1116. int bytesperpixel;
  1117. int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
  1118. AVFrame *f;
  1119. ptrdiff_t yoff, uvoff, ls_y, ls_uv;
  1120. f = s->s.frames[CUR_FRAME].tf.f;
  1121. ls_y = f->linesize[0];
  1122. ls_uv =f->linesize[1];
  1123. bytesperpixel = s->bytesperpixel;
  1124. yoff = uvoff = 0;
  1125. for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
  1126. set_tile_offset(&tile_row_start, &tile_row_end,
  1127. tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
  1128. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  1129. int64_t tile_size;
  1130. if (tile_col == s->s.h.tiling.tile_cols - 1 &&
  1131. tile_row == s->s.h.tiling.tile_rows - 1) {
  1132. tile_size = size;
  1133. } else {
  1134. tile_size = AV_RB32(data);
  1135. data += 4;
  1136. size -= 4;
  1137. }
  1138. if (tile_size > size) {
  1139. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1140. return AVERROR_INVALIDDATA;
  1141. }
  1142. ret = ff_vp56_init_range_decoder(&td->c_b[tile_col], data, tile_size);
  1143. if (ret < 0)
  1144. return ret;
  1145. if (vp56_rac_get_prob_branchy(&td->c_b[tile_col], 128)) { // marker bit
  1146. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1147. return AVERROR_INVALIDDATA;
  1148. }
  1149. data += tile_size;
  1150. size -= tile_size;
  1151. }
  1152. for (row = tile_row_start; row < tile_row_end;
  1153. row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
  1154. VP9Filter *lflvl_ptr = s->lflvl;
  1155. ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
  1156. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  1157. set_tile_offset(&tile_col_start, &tile_col_end,
  1158. tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
  1159. td->tile_col_start = tile_col_start;
  1160. if (s->pass != 2) {
  1161. memset(td->left_partition_ctx, 0, 8);
  1162. memset(td->left_skip_ctx, 0, 8);
  1163. if (s->s.h.keyframe || s->s.h.intraonly) {
  1164. memset(td->left_mode_ctx, DC_PRED, 16);
  1165. } else {
  1166. memset(td->left_mode_ctx, NEARESTMV, 8);
  1167. }
  1168. memset(td->left_y_nnz_ctx, 0, 16);
  1169. memset(td->left_uv_nnz_ctx, 0, 32);
  1170. memset(td->left_segpred_ctx, 0, 8);
  1171. td->c = &td->c_b[tile_col];
  1172. }
  1173. for (col = tile_col_start;
  1174. col < tile_col_end;
  1175. col += 8, yoff2 += 64 * bytesperpixel,
  1176. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1177. // FIXME integrate with lf code (i.e. zero after each
  1178. // use, similar to invtxfm coefficients, or similar)
  1179. if (s->pass != 1) {
  1180. memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
  1181. }
  1182. if (s->pass == 2) {
  1183. decode_sb_mem(td, row, col, lflvl_ptr,
  1184. yoff2, uvoff2, BL_64X64);
  1185. } else {
  1186. if (vpX_rac_is_end(td->c)) {
  1187. return AVERROR_INVALIDDATA;
  1188. }
  1189. decode_sb(td, row, col, lflvl_ptr,
  1190. yoff2, uvoff2, BL_64X64);
  1191. }
  1192. }
  1193. }
  1194. if (s->pass == 1)
  1195. continue;
  1196. // backup pre-loopfilter reconstruction data for intra
  1197. // prediction of next row of sb64s
  1198. if (row + 8 < s->rows) {
  1199. memcpy(s->intra_pred_data[0],
  1200. f->data[0] + yoff + 63 * ls_y,
  1201. 8 * s->cols * bytesperpixel);
  1202. memcpy(s->intra_pred_data[1],
  1203. f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1204. 8 * s->cols * bytesperpixel >> s->ss_h);
  1205. memcpy(s->intra_pred_data[2],
  1206. f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1207. 8 * s->cols * bytesperpixel >> s->ss_h);
  1208. }
  1209. // loopfilter one row
  1210. if (s->s.h.filter.level) {
  1211. yoff2 = yoff;
  1212. uvoff2 = uvoff;
  1213. lflvl_ptr = s->lflvl;
  1214. for (col = 0; col < s->cols;
  1215. col += 8, yoff2 += 64 * bytesperpixel,
  1216. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1217. ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
  1218. yoff2, uvoff2);
  1219. }
  1220. }
  1221. // FIXME maybe we can make this more finegrained by running the
  1222. // loopfilter per-block instead of after each sbrow
  1223. // In fact that would also make intra pred left preparation easier?
  1224. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
  1225. }
  1226. }
  1227. return 0;
  1228. }
  1229. #if HAVE_THREADS
  1230. static av_always_inline
  1231. int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
  1232. int threadnr)
  1233. {
  1234. VP9Context *s = avctx->priv_data;
  1235. VP9TileData *td = &s->td[jobnr];
  1236. ptrdiff_t uvoff, yoff, ls_y, ls_uv;
  1237. int bytesperpixel = s->bytesperpixel, row, col, tile_row;
  1238. unsigned tile_cols_len;
  1239. int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
  1240. VP9Filter *lflvl_ptr_base;
  1241. AVFrame *f;
  1242. f = s->s.frames[CUR_FRAME].tf.f;
  1243. ls_y = f->linesize[0];
  1244. ls_uv =f->linesize[1];
  1245. set_tile_offset(&tile_col_start, &tile_col_end,
  1246. jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
  1247. td->tile_col_start = tile_col_start;
  1248. uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
  1249. yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
  1250. lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
  1251. for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
  1252. set_tile_offset(&tile_row_start, &tile_row_end,
  1253. tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
  1254. td->c = &td->c_b[tile_row];
  1255. for (row = tile_row_start; row < tile_row_end;
  1256. row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
  1257. ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
  1258. VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
  1259. memset(td->left_partition_ctx, 0, 8);
  1260. memset(td->left_skip_ctx, 0, 8);
  1261. if (s->s.h.keyframe || s->s.h.intraonly) {
  1262. memset(td->left_mode_ctx, DC_PRED, 16);
  1263. } else {
  1264. memset(td->left_mode_ctx, NEARESTMV, 8);
  1265. }
  1266. memset(td->left_y_nnz_ctx, 0, 16);
  1267. memset(td->left_uv_nnz_ctx, 0, 32);
  1268. memset(td->left_segpred_ctx, 0, 8);
  1269. for (col = tile_col_start;
  1270. col < tile_col_end;
  1271. col += 8, yoff2 += 64 * bytesperpixel,
  1272. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1273. // FIXME integrate with lf code (i.e. zero after each
  1274. // use, similar to invtxfm coefficients, or similar)
  1275. memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
  1276. decode_sb(td, row, col, lflvl_ptr,
  1277. yoff2, uvoff2, BL_64X64);
  1278. }
  1279. // backup pre-loopfilter reconstruction data for intra
  1280. // prediction of next row of sb64s
  1281. tile_cols_len = tile_col_end - tile_col_start;
  1282. if (row + 8 < s->rows) {
  1283. memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
  1284. f->data[0] + yoff + 63 * ls_y,
  1285. 8 * tile_cols_len * bytesperpixel);
  1286. memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
  1287. f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1288. 8 * tile_cols_len * bytesperpixel >> s->ss_h);
  1289. memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
  1290. f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1291. 8 * tile_cols_len * bytesperpixel >> s->ss_h);
  1292. }
  1293. vp9_report_tile_progress(s, row >> 3, 1);
  1294. }
  1295. }
  1296. return 0;
  1297. }
  1298. static av_always_inline
  1299. int loopfilter_proc(AVCodecContext *avctx)
  1300. {
  1301. VP9Context *s = avctx->priv_data;
  1302. ptrdiff_t uvoff, yoff, ls_y, ls_uv;
  1303. VP9Filter *lflvl_ptr;
  1304. int bytesperpixel = s->bytesperpixel, col, i;
  1305. AVFrame *f;
  1306. f = s->s.frames[CUR_FRAME].tf.f;
  1307. ls_y = f->linesize[0];
  1308. ls_uv =f->linesize[1];
  1309. for (i = 0; i < s->sb_rows; i++) {
  1310. vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
  1311. if (s->s.h.filter.level) {
  1312. yoff = (ls_y * 64)*i;
  1313. uvoff = (ls_uv * 64 >> s->ss_v)*i;
  1314. lflvl_ptr = s->lflvl+s->sb_cols*i;
  1315. for (col = 0; col < s->cols;
  1316. col += 8, yoff += 64 * bytesperpixel,
  1317. uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1318. ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
  1319. yoff, uvoff);
  1320. }
  1321. }
  1322. }
  1323. return 0;
  1324. }
  1325. #endif
  1326. static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
  1327. int *got_frame, AVPacket *pkt)
  1328. {
  1329. const uint8_t *data = pkt->data;
  1330. int size = pkt->size;
  1331. VP9Context *s = avctx->priv_data;
  1332. int ret, i, j, ref;
  1333. int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
  1334. (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
  1335. AVFrame *f;
  1336. if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
  1337. return ret;
  1338. } else if (ret == 0) {
  1339. if (!s->s.refs[ref].f->buf[0]) {
  1340. av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
  1341. return AVERROR_INVALIDDATA;
  1342. }
  1343. if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
  1344. return ret;
  1345. ((AVFrame *)frame)->pts = pkt->pts;
  1346. #if FF_API_PKT_PTS
  1347. FF_DISABLE_DEPRECATION_WARNINGS
  1348. ((AVFrame *)frame)->pkt_pts = pkt->pts;
  1349. FF_ENABLE_DEPRECATION_WARNINGS
  1350. #endif
  1351. ((AVFrame *)frame)->pkt_dts = pkt->dts;
  1352. for (i = 0; i < 8; i++) {
  1353. if (s->next_refs[i].f->buf[0])
  1354. ff_thread_release_buffer(avctx, &s->next_refs[i]);
  1355. if (s->s.refs[i].f->buf[0] &&
  1356. (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
  1357. return ret;
  1358. }
  1359. *got_frame = 1;
  1360. return pkt->size;
  1361. }
  1362. data += ret;
  1363. size -= ret;
  1364. if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
  1365. if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
  1366. vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
  1367. if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
  1368. (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
  1369. return ret;
  1370. }
  1371. if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
  1372. vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
  1373. if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
  1374. (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
  1375. return ret;
  1376. if (s->s.frames[CUR_FRAME].tf.f->buf[0])
  1377. vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
  1378. if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
  1379. return ret;
  1380. f = s->s.frames[CUR_FRAME].tf.f;
  1381. f->key_frame = s->s.h.keyframe;
  1382. f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  1383. if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
  1384. (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
  1385. s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
  1386. vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
  1387. }
  1388. // ref frame setup
  1389. for (i = 0; i < 8; i++) {
  1390. if (s->next_refs[i].f->buf[0])
  1391. ff_thread_release_buffer(avctx, &s->next_refs[i]);
  1392. if (s->s.h.refreshrefmask & (1 << i)) {
  1393. ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
  1394. } else if (s->s.refs[i].f->buf[0]) {
  1395. ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
  1396. }
  1397. if (ret < 0)
  1398. return ret;
  1399. }
  1400. if (avctx->hwaccel) {
  1401. ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
  1402. if (ret < 0)
  1403. return ret;
  1404. ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
  1405. if (ret < 0)
  1406. return ret;
  1407. ret = avctx->hwaccel->end_frame(avctx);
  1408. if (ret < 0)
  1409. return ret;
  1410. goto finish;
  1411. }
  1412. // main tile decode loop
  1413. memset(s->above_partition_ctx, 0, s->cols);
  1414. memset(s->above_skip_ctx, 0, s->cols);
  1415. if (s->s.h.keyframe || s->s.h.intraonly) {
  1416. memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
  1417. } else {
  1418. memset(s->above_mode_ctx, NEARESTMV, s->cols);
  1419. }
  1420. memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
  1421. memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
  1422. memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
  1423. memset(s->above_segpred_ctx, 0, s->cols);
  1424. s->pass = s->s.frames[CUR_FRAME].uses_2pass =
  1425. avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
  1426. if ((ret = update_block_buffers(avctx)) < 0) {
  1427. av_log(avctx, AV_LOG_ERROR,
  1428. "Failed to allocate block buffers\n");
  1429. return ret;
  1430. }
  1431. if (s->s.h.refreshctx && s->s.h.parallelmode) {
  1432. int j, k, l, m;
  1433. for (i = 0; i < 4; i++) {
  1434. for (j = 0; j < 2; j++)
  1435. for (k = 0; k < 2; k++)
  1436. for (l = 0; l < 6; l++)
  1437. for (m = 0; m < 6; m++)
  1438. memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
  1439. s->prob.coef[i][j][k][l][m], 3);
  1440. if (s->s.h.txfmmode == i)
  1441. break;
  1442. }
  1443. s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
  1444. ff_thread_finish_setup(avctx);
  1445. } else if (!s->s.h.refreshctx) {
  1446. ff_thread_finish_setup(avctx);
  1447. }
  1448. #if HAVE_THREADS
  1449. if (avctx->active_thread_type & FF_THREAD_SLICE) {
  1450. for (i = 0; i < s->sb_rows; i++)
  1451. atomic_store(&s->entries[i], 0);
  1452. }
  1453. #endif
  1454. do {
  1455. for (i = 0; i < s->active_tile_cols; i++) {
  1456. s->td[i].b = s->td[i].b_base;
  1457. s->td[i].block = s->td[i].block_base;
  1458. s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
  1459. s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
  1460. s->td[i].eob = s->td[i].eob_base;
  1461. s->td[i].uveob[0] = s->td[i].uveob_base[0];
  1462. s->td[i].uveob[1] = s->td[i].uveob_base[1];
  1463. }
  1464. #if HAVE_THREADS
  1465. if (avctx->active_thread_type == FF_THREAD_SLICE) {
  1466. int tile_row, tile_col;
  1467. av_assert1(!s->pass);
  1468. for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
  1469. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  1470. int64_t tile_size;
  1471. if (tile_col == s->s.h.tiling.tile_cols - 1 &&
  1472. tile_row == s->s.h.tiling.tile_rows - 1) {
  1473. tile_size = size;
  1474. } else {
  1475. tile_size = AV_RB32(data);
  1476. data += 4;
  1477. size -= 4;
  1478. }
  1479. if (tile_size > size)
  1480. return AVERROR_INVALIDDATA;
  1481. ret = ff_vp56_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
  1482. if (ret < 0)
  1483. return ret;
  1484. if (vp56_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
  1485. return AVERROR_INVALIDDATA;
  1486. data += tile_size;
  1487. size -= tile_size;
  1488. }
  1489. }
  1490. ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
  1491. } else
  1492. #endif
  1493. {
  1494. ret = decode_tiles(avctx, data, size);
  1495. if (ret < 0) {
  1496. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1497. return ret;
  1498. }
  1499. }
  1500. // Sum all counts fields into td[0].counts for tile threading
  1501. if (avctx->active_thread_type == FF_THREAD_SLICE)
  1502. for (i = 1; i < s->s.h.tiling.tile_cols; i++)
  1503. for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
  1504. ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
  1505. if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
  1506. ff_vp9_adapt_probs(s);
  1507. ff_thread_finish_setup(avctx);
  1508. }
  1509. } while (s->pass++ == 1);
  1510. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1511. finish:
  1512. // ref frame setup
  1513. for (i = 0; i < 8; i++) {
  1514. if (s->s.refs[i].f->buf[0])
  1515. ff_thread_release_buffer(avctx, &s->s.refs[i]);
  1516. if (s->next_refs[i].f->buf[0] &&
  1517. (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
  1518. return ret;
  1519. }
  1520. if (!s->s.h.invisible) {
  1521. if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
  1522. return ret;
  1523. *got_frame = 1;
  1524. }
  1525. return pkt->size;
  1526. }
  1527. static void vp9_decode_flush(AVCodecContext *avctx)
  1528. {
  1529. VP9Context *s = avctx->priv_data;
  1530. int i;
  1531. for (i = 0; i < 3; i++)
  1532. vp9_frame_unref(avctx, &s->s.frames[i]);
  1533. for (i = 0; i < 8; i++)
  1534. ff_thread_release_buffer(avctx, &s->s.refs[i]);
  1535. }
  1536. static int init_frames(AVCodecContext *avctx)
  1537. {
  1538. VP9Context *s = avctx->priv_data;
  1539. int i;
  1540. for (i = 0; i < 3; i++) {
  1541. s->s.frames[i].tf.f = av_frame_alloc();
  1542. if (!s->s.frames[i].tf.f) {
  1543. vp9_decode_free(avctx);
  1544. av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  1545. return AVERROR(ENOMEM);
  1546. }
  1547. }
  1548. for (i = 0; i < 8; i++) {
  1549. s->s.refs[i].f = av_frame_alloc();
  1550. s->next_refs[i].f = av_frame_alloc();
  1551. if (!s->s.refs[i].f || !s->next_refs[i].f) {
  1552. vp9_decode_free(avctx);
  1553. av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
  1554. return AVERROR(ENOMEM);
  1555. }
  1556. }
  1557. return 0;
  1558. }
  1559. static av_cold int vp9_decode_init(AVCodecContext *avctx)
  1560. {
  1561. VP9Context *s = avctx->priv_data;
  1562. avctx->internal->allocate_progress = 1;
  1563. s->last_bpp = 0;
  1564. s->s.h.filter.sharpness = -1;
  1565. return init_frames(avctx);
  1566. }
  1567. #if HAVE_THREADS
  1568. static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
  1569. {
  1570. return init_frames(avctx);
  1571. }
  1572. static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  1573. {
  1574. int i, ret;
  1575. VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
  1576. for (i = 0; i < 3; i++) {
  1577. if (s->s.frames[i].tf.f->buf[0])
  1578. vp9_frame_unref(dst, &s->s.frames[i]);
  1579. if (ssrc->s.frames[i].tf.f->buf[0]) {
  1580. if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
  1581. return ret;
  1582. }
  1583. }
  1584. for (i = 0; i < 8; i++) {
  1585. if (s->s.refs[i].f->buf[0])
  1586. ff_thread_release_buffer(dst, &s->s.refs[i]);
  1587. if (ssrc->next_refs[i].f->buf[0]) {
  1588. if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
  1589. return ret;
  1590. }
  1591. }
  1592. s->s.h.invisible = ssrc->s.h.invisible;
  1593. s->s.h.keyframe = ssrc->s.h.keyframe;
  1594. s->s.h.intraonly = ssrc->s.h.intraonly;
  1595. s->ss_v = ssrc->ss_v;
  1596. s->ss_h = ssrc->ss_h;
  1597. s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
  1598. s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
  1599. s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
  1600. s->bytesperpixel = ssrc->bytesperpixel;
  1601. s->gf_fmt = ssrc->gf_fmt;
  1602. s->w = ssrc->w;
  1603. s->h = ssrc->h;
  1604. s->s.h.bpp = ssrc->s.h.bpp;
  1605. s->bpp_index = ssrc->bpp_index;
  1606. s->pix_fmt = ssrc->pix_fmt;
  1607. memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
  1608. memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
  1609. memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
  1610. sizeof(s->s.h.segmentation.feat));
  1611. return 0;
  1612. }
  1613. #endif
  1614. AVCodec ff_vp9_decoder = {
  1615. .name = "vp9",
  1616. .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
  1617. .type = AVMEDIA_TYPE_VIDEO,
  1618. .id = AV_CODEC_ID_VP9,
  1619. .priv_data_size = sizeof(VP9Context),
  1620. .init = vp9_decode_init,
  1621. .close = vp9_decode_free,
  1622. .decode = vp9_decode_frame,
  1623. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
  1624. .caps_internal = FF_CODEC_CAP_SLICE_THREAD_HAS_MF,
  1625. .flush = vp9_decode_flush,
  1626. .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
  1627. .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
  1628. .profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
  1629. .bsfs = "vp9_superframe_split",
  1630. .hw_configs = (const AVCodecHWConfigInternal*[]) {
  1631. #if CONFIG_VP9_DXVA2_HWACCEL
  1632. HWACCEL_DXVA2(vp9),
  1633. #endif
  1634. #if CONFIG_VP9_D3D11VA_HWACCEL
  1635. HWACCEL_D3D11VA(vp9),
  1636. #endif
  1637. #if CONFIG_VP9_D3D11VA2_HWACCEL
  1638. HWACCEL_D3D11VA2(vp9),
  1639. #endif
  1640. #if CONFIG_VP9_NVDEC_HWACCEL
  1641. HWACCEL_NVDEC(vp9),
  1642. #endif
  1643. #if CONFIG_VP9_VAAPI_HWACCEL
  1644. HWACCEL_VAAPI(vp9),
  1645. #endif
  1646. NULL
  1647. },
  1648. };