You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

471 lines
20KB

  1. /*
  2. * DXVA2 HEVC HW acceleration.
  3. *
  4. * copyright (c) 2014 - 2015 Hendrik Leppkes
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/avassert.h"
  23. #include "hevc_data.h"
  24. #include "hevcdec.h"
  25. // The headers above may include w32threads.h, which uses the original
  26. // _WIN32_WINNT define, while dxva2_internal.h redefines it to target a
  27. // potentially newer version.
  28. #include "dxva2_internal.h"
  29. #define MAX_SLICES 256
  30. struct hevc_dxva2_picture_context {
  31. DXVA_PicParams_HEVC pp;
  32. DXVA_Qmatrix_HEVC qm;
  33. unsigned slice_count;
  34. DXVA_Slice_HEVC_Short slice_short[MAX_SLICES];
  35. const uint8_t *bitstream;
  36. unsigned bitstream_size;
  37. };
  38. static void fill_picture_entry(DXVA_PicEntry_HEVC *pic,
  39. unsigned index, unsigned flag)
  40. {
  41. av_assert0((index & 0x7f) == index && (flag & 0x01) == flag);
  42. pic->bPicEntry = index | (flag << 7);
  43. }
  44. static int get_refpic_index(const DXVA_PicParams_HEVC *pp, int surface_index)
  45. {
  46. int i;
  47. for (i = 0; i < FF_ARRAY_ELEMS(pp->RefPicList); i++) {
  48. if ((pp->RefPicList[i].bPicEntry & 0x7f) == surface_index)
  49. return i;
  50. }
  51. return 0xff;
  52. }
  53. static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const HEVCContext *h,
  54. DXVA_PicParams_HEVC *pp)
  55. {
  56. const HEVCFrame *current_picture = h->ref;
  57. const HEVCSPS *sps = h->ps.sps;
  58. const HEVCPPS *pps = h->ps.pps;
  59. int i, j;
  60. memset(pp, 0, sizeof(*pp));
  61. pp->PicWidthInMinCbsY = sps->min_cb_width;
  62. pp->PicHeightInMinCbsY = sps->min_cb_height;
  63. pp->wFormatAndSequenceInfoFlags = (sps->chroma_format_idc << 0) |
  64. (sps->separate_colour_plane_flag << 2) |
  65. ((sps->bit_depth - 8) << 3) |
  66. ((sps->bit_depth - 8) << 6) |
  67. ((sps->log2_max_poc_lsb - 4) << 9) |
  68. (0 << 13) |
  69. (0 << 14) |
  70. (0 << 15);
  71. fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, current_picture->frame), 0);
  72. pp->sps_max_dec_pic_buffering_minus1 = sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering - 1;
  73. pp->log2_min_luma_coding_block_size_minus3 = sps->log2_min_cb_size - 3;
  74. pp->log2_diff_max_min_luma_coding_block_size = sps->log2_diff_max_min_coding_block_size;
  75. pp->log2_min_transform_block_size_minus2 = sps->log2_min_tb_size - 2;
  76. pp->log2_diff_max_min_transform_block_size = sps->log2_max_trafo_size - sps->log2_min_tb_size;
  77. pp->max_transform_hierarchy_depth_inter = sps->max_transform_hierarchy_depth_inter;
  78. pp->max_transform_hierarchy_depth_intra = sps->max_transform_hierarchy_depth_intra;
  79. pp->num_short_term_ref_pic_sets = sps->nb_st_rps;
  80. pp->num_long_term_ref_pics_sps = sps->num_long_term_ref_pics_sps;
  81. pp->num_ref_idx_l0_default_active_minus1 = pps->num_ref_idx_l0_default_active - 1;
  82. pp->num_ref_idx_l1_default_active_minus1 = pps->num_ref_idx_l1_default_active - 1;
  83. pp->init_qp_minus26 = pps->pic_init_qp_minus26;
  84. if (h->sh.short_term_ref_pic_set_sps_flag == 0 && h->sh.short_term_rps) {
  85. pp->ucNumDeltaPocsOfRefRpsIdx = h->sh.short_term_rps->rps_idx_num_delta_pocs;
  86. pp->wNumBitsForShortTermRPSInSlice = h->sh.short_term_ref_pic_set_size;
  87. }
  88. pp->dwCodingParamToolFlags = (sps->scaling_list_enable_flag << 0) |
  89. (sps->amp_enabled_flag << 1) |
  90. (sps->sao_enabled << 2) |
  91. (sps->pcm_enabled_flag << 3) |
  92. ((sps->pcm_enabled_flag ? (sps->pcm.bit_depth - 1) : 0) << 4) |
  93. ((sps->pcm_enabled_flag ? (sps->pcm.bit_depth_chroma - 1) : 0) << 8) |
  94. ((sps->pcm_enabled_flag ? (sps->pcm.log2_min_pcm_cb_size - 3) : 0) << 12) |
  95. ((sps->pcm_enabled_flag ? (sps->pcm.log2_max_pcm_cb_size - sps->pcm.log2_min_pcm_cb_size) : 0) << 14) |
  96. (sps->pcm.loop_filter_disable_flag << 16) |
  97. (sps->long_term_ref_pics_present_flag << 17) |
  98. (sps->sps_temporal_mvp_enabled_flag << 18) |
  99. (sps->sps_strong_intra_smoothing_enable_flag << 19) |
  100. (pps->dependent_slice_segments_enabled_flag << 20) |
  101. (pps->output_flag_present_flag << 21) |
  102. (pps->num_extra_slice_header_bits << 22) |
  103. (pps->sign_data_hiding_flag << 25) |
  104. (pps->cabac_init_present_flag << 26) |
  105. (0 << 27);
  106. pp->dwCodingSettingPicturePropertyFlags = (pps->constrained_intra_pred_flag << 0) |
  107. (pps->transform_skip_enabled_flag << 1) |
  108. (pps->cu_qp_delta_enabled_flag << 2) |
  109. (pps->pic_slice_level_chroma_qp_offsets_present_flag << 3) |
  110. (pps->weighted_pred_flag << 4) |
  111. (pps->weighted_bipred_flag << 5) |
  112. (pps->transquant_bypass_enable_flag << 6) |
  113. (pps->tiles_enabled_flag << 7) |
  114. (pps->entropy_coding_sync_enabled_flag << 8) |
  115. (pps->uniform_spacing_flag << 9) |
  116. ((pps->tiles_enabled_flag ? pps->loop_filter_across_tiles_enabled_flag : 0) << 10) |
  117. (pps->seq_loop_filter_across_slices_enabled_flag << 11) |
  118. (pps->deblocking_filter_override_enabled_flag << 12) |
  119. (pps->disable_dbf << 13) |
  120. (pps->lists_modification_present_flag << 14) |
  121. (pps->slice_header_extension_present_flag << 15) |
  122. (IS_IRAP(h) << 16) |
  123. (IS_IDR(h) << 17) |
  124. /* IntraPicFlag */
  125. (IS_IRAP(h) << 18) |
  126. (0 << 19);
  127. pp->pps_cb_qp_offset = pps->cb_qp_offset;
  128. pp->pps_cr_qp_offset = pps->cr_qp_offset;
  129. if (pps->tiles_enabled_flag) {
  130. pp->num_tile_columns_minus1 = pps->num_tile_columns - 1;
  131. pp->num_tile_rows_minus1 = pps->num_tile_rows - 1;
  132. if (!pps->uniform_spacing_flag) {
  133. for (i = 0; i < pps->num_tile_columns; i++)
  134. pp->column_width_minus1[i] = pps->column_width[i] - 1;
  135. for (i = 0; i < pps->num_tile_rows; i++)
  136. pp->row_height_minus1[i] = pps->row_height[i] - 1;
  137. }
  138. }
  139. pp->diff_cu_qp_delta_depth = pps->diff_cu_qp_delta_depth;
  140. pp->pps_beta_offset_div2 = pps->beta_offset / 2;
  141. pp->pps_tc_offset_div2 = pps->tc_offset / 2;
  142. pp->log2_parallel_merge_level_minus2 = pps->log2_parallel_merge_level - 2;
  143. pp->CurrPicOrderCntVal = h->poc;
  144. // fill RefPicList from the DPB
  145. for (i = 0, j = 0; i < FF_ARRAY_ELEMS(pp->RefPicList); i++) {
  146. const HEVCFrame *frame = NULL;
  147. while (!frame && j < FF_ARRAY_ELEMS(h->DPB)) {
  148. if (&h->DPB[j] != current_picture && (h->DPB[j].flags & (HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLAG_SHORT_REF)))
  149. frame = &h->DPB[j];
  150. j++;
  151. }
  152. if (frame) {
  153. fill_picture_entry(&pp->RefPicList[i], ff_dxva2_get_surface_index(avctx, ctx, frame->frame), !!(frame->flags & HEVC_FRAME_FLAG_LONG_REF));
  154. pp->PicOrderCntValList[i] = frame->poc;
  155. } else {
  156. pp->RefPicList[i].bPicEntry = 0xff;
  157. pp->PicOrderCntValList[i] = 0;
  158. }
  159. }
  160. #define DO_REF_LIST(ref_idx, ref_list) { \
  161. const RefPicList *rpl = &h->rps[ref_idx]; \
  162. for (i = 0, j = 0; i < FF_ARRAY_ELEMS(pp->ref_list); i++) { \
  163. const HEVCFrame *frame = NULL; \
  164. while (!frame && j < rpl->nb_refs) \
  165. frame = rpl->ref[j++]; \
  166. if (frame) \
  167. pp->ref_list[i] = get_refpic_index(pp, ff_dxva2_get_surface_index(avctx, ctx, frame->frame)); \
  168. else \
  169. pp->ref_list[i] = 0xff; \
  170. } \
  171. }
  172. // Fill short term and long term lists
  173. DO_REF_LIST(ST_CURR_BEF, RefPicSetStCurrBefore);
  174. DO_REF_LIST(ST_CURR_AFT, RefPicSetStCurrAfter);
  175. DO_REF_LIST(LT_CURR, RefPicSetLtCurr);
  176. pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
  177. }
  178. static void fill_scaling_lists(AVDXVAContext *ctx, const HEVCContext *h, DXVA_Qmatrix_HEVC *qm)
  179. {
  180. unsigned i, j, pos;
  181. const ScalingList *sl = h->ps.pps->scaling_list_data_present_flag ?
  182. &h->ps.pps->scaling_list : &h->ps.sps->scaling_list;
  183. memset(qm, 0, sizeof(*qm));
  184. for (i = 0; i < 6; i++) {
  185. for (j = 0; j < 16; j++) {
  186. pos = 4 * ff_hevc_diag_scan4x4_y[j] + ff_hevc_diag_scan4x4_x[j];
  187. qm->ucScalingLists0[i][j] = sl->sl[0][i][pos];
  188. }
  189. for (j = 0; j < 64; j++) {
  190. pos = 8 * ff_hevc_diag_scan8x8_y[j] + ff_hevc_diag_scan8x8_x[j];
  191. qm->ucScalingLists1[i][j] = sl->sl[1][i][pos];
  192. qm->ucScalingLists2[i][j] = sl->sl[2][i][pos];
  193. if (i < 2)
  194. qm->ucScalingLists3[i][j] = sl->sl[3][i * 3][pos];
  195. }
  196. qm->ucScalingListDCCoefSizeID2[i] = sl->sl_dc[0][i];
  197. if (i < 2)
  198. qm->ucScalingListDCCoefSizeID3[i] = sl->sl_dc[1][i * 3];
  199. }
  200. }
  201. static void fill_slice_short(DXVA_Slice_HEVC_Short *slice,
  202. unsigned position, unsigned size)
  203. {
  204. memset(slice, 0, sizeof(*slice));
  205. slice->BSNALunitDataLocation = position;
  206. slice->SliceBytesInBuffer = size;
  207. slice->wBadSliceChopping = 0;
  208. }
  209. static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
  210. DECODER_BUFFER_DESC *bs,
  211. DECODER_BUFFER_DESC *sc)
  212. {
  213. const HEVCContext *h = avctx->priv_data;
  214. AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
  215. const HEVCFrame *current_picture = h->ref;
  216. struct hevc_dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
  217. DXVA_Slice_HEVC_Short *slice = NULL;
  218. void *dxva_data_ptr;
  219. uint8_t *dxva_data, *current, *end;
  220. unsigned dxva_size;
  221. void *slice_data;
  222. unsigned slice_size;
  223. unsigned padding;
  224. unsigned i;
  225. unsigned type;
  226. /* Create an annex B bitstream buffer with only slice NAL and finalize slice */
  227. #if CONFIG_D3D11VA
  228. if (ff_dxva2_is_d3d11(avctx)) {
  229. type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
  230. if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
  231. D3D11VA_CONTEXT(ctx)->decoder,
  232. type,
  233. &dxva_size, &dxva_data_ptr)))
  234. return -1;
  235. }
  236. #endif
  237. #if CONFIG_DXVA2
  238. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  239. type = DXVA2_BitStreamDateBufferType;
  240. if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
  241. type,
  242. &dxva_data_ptr, &dxva_size)))
  243. return -1;
  244. }
  245. #endif
  246. dxva_data = dxva_data_ptr;
  247. current = dxva_data;
  248. end = dxva_data + dxva_size;
  249. for (i = 0; i < ctx_pic->slice_count; i++) {
  250. static const uint8_t start_code[] = { 0, 0, 1 };
  251. static const unsigned start_code_size = sizeof(start_code);
  252. unsigned position, size;
  253. slice = &ctx_pic->slice_short[i];
  254. position = slice->BSNALunitDataLocation;
  255. size = slice->SliceBytesInBuffer;
  256. if (start_code_size + size > end - current) {
  257. av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream");
  258. break;
  259. }
  260. slice->BSNALunitDataLocation = current - dxva_data;
  261. slice->SliceBytesInBuffer = start_code_size + size;
  262. memcpy(current, start_code, start_code_size);
  263. current += start_code_size;
  264. memcpy(current, &ctx_pic->bitstream[position], size);
  265. current += size;
  266. }
  267. padding = FFMIN(128 - ((current - dxva_data) & 127), end - current);
  268. if (slice && padding > 0) {
  269. memset(current, 0, padding);
  270. current += padding;
  271. slice->SliceBytesInBuffer += padding;
  272. }
  273. #if CONFIG_D3D11VA
  274. if (ff_dxva2_is_d3d11(avctx))
  275. if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
  276. return -1;
  277. #endif
  278. #if CONFIG_DXVA2
  279. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
  280. if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
  281. return -1;
  282. #endif
  283. if (i < ctx_pic->slice_count)
  284. return -1;
  285. #if CONFIG_D3D11VA
  286. if (ff_dxva2_is_d3d11(avctx)) {
  287. D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
  288. memset(dsc11, 0, sizeof(*dsc11));
  289. dsc11->BufferType = type;
  290. dsc11->DataSize = current - dxva_data;
  291. dsc11->NumMBsInBuffer = 0;
  292. type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
  293. }
  294. #endif
  295. #if CONFIG_DXVA2
  296. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  297. DXVA2_DecodeBufferDesc *dsc2 = bs;
  298. memset(dsc2, 0, sizeof(*dsc2));
  299. dsc2->CompressedBufferType = type;
  300. dsc2->DataSize = current - dxva_data;
  301. dsc2->NumMBsInBuffer = 0;
  302. type = DXVA2_SliceControlBufferType;
  303. }
  304. #endif
  305. slice_data = ctx_pic->slice_short;
  306. slice_size = ctx_pic->slice_count * sizeof(*ctx_pic->slice_short);
  307. av_assert0(((current - dxva_data) & 127) == 0);
  308. return ff_dxva2_commit_buffer(avctx, ctx, sc,
  309. type,
  310. slice_data, slice_size, 0);
  311. }
  312. static int dxva2_hevc_start_frame(AVCodecContext *avctx,
  313. av_unused const uint8_t *buffer,
  314. av_unused uint32_t size)
  315. {
  316. const HEVCContext *h = avctx->priv_data;
  317. AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
  318. struct hevc_dxva2_picture_context *ctx_pic = h->ref->hwaccel_picture_private;
  319. if (!DXVA_CONTEXT_VALID(avctx, ctx))
  320. return -1;
  321. av_assert0(ctx_pic);
  322. /* Fill up DXVA_PicParams_HEVC */
  323. fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp);
  324. /* Fill up DXVA_Qmatrix_HEVC */
  325. fill_scaling_lists(ctx, h, &ctx_pic->qm);
  326. ctx_pic->slice_count = 0;
  327. ctx_pic->bitstream_size = 0;
  328. ctx_pic->bitstream = NULL;
  329. return 0;
  330. }
  331. static int dxva2_hevc_decode_slice(AVCodecContext *avctx,
  332. const uint8_t *buffer,
  333. uint32_t size)
  334. {
  335. const HEVCContext *h = avctx->priv_data;
  336. const HEVCFrame *current_picture = h->ref;
  337. struct hevc_dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
  338. unsigned position;
  339. if (ctx_pic->slice_count >= MAX_SLICES)
  340. return -1;
  341. if (!ctx_pic->bitstream)
  342. ctx_pic->bitstream = buffer;
  343. ctx_pic->bitstream_size += size;
  344. position = buffer - ctx_pic->bitstream;
  345. fill_slice_short(&ctx_pic->slice_short[ctx_pic->slice_count], position, size);
  346. ctx_pic->slice_count++;
  347. return 0;
  348. }
  349. static int dxva2_hevc_end_frame(AVCodecContext *avctx)
  350. {
  351. HEVCContext *h = avctx->priv_data;
  352. struct hevc_dxva2_picture_context *ctx_pic = h->ref->hwaccel_picture_private;
  353. int scale = ctx_pic->pp.dwCodingParamToolFlags & 1;
  354. int ret;
  355. if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
  356. return -1;
  357. ret = ff_dxva2_common_end_frame(avctx, h->ref->frame,
  358. &ctx_pic->pp, sizeof(ctx_pic->pp),
  359. scale ? &ctx_pic->qm : NULL, scale ? sizeof(ctx_pic->qm) : 0,
  360. commit_bitstream_and_slice_buffer);
  361. return ret;
  362. }
  363. #if CONFIG_HEVC_DXVA2_HWACCEL
  364. AVHWAccel ff_hevc_dxva2_hwaccel = {
  365. .name = "hevc_dxva2",
  366. .type = AVMEDIA_TYPE_VIDEO,
  367. .id = AV_CODEC_ID_HEVC,
  368. .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
  369. .init = ff_dxva2_decode_init,
  370. .uninit = ff_dxva2_decode_uninit,
  371. .start_frame = dxva2_hevc_start_frame,
  372. .decode_slice = dxva2_hevc_decode_slice,
  373. .end_frame = dxva2_hevc_end_frame,
  374. .frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context),
  375. .priv_data_size = sizeof(FFDXVASharedContext),
  376. };
  377. #endif
  378. #if CONFIG_HEVC_D3D11VA_HWACCEL
  379. AVHWAccel ff_hevc_d3d11va_hwaccel = {
  380. .name = "hevc_d3d11va",
  381. .type = AVMEDIA_TYPE_VIDEO,
  382. .id = AV_CODEC_ID_HEVC,
  383. .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
  384. .init = ff_dxva2_decode_init,
  385. .uninit = ff_dxva2_decode_uninit,
  386. .start_frame = dxva2_hevc_start_frame,
  387. .decode_slice = dxva2_hevc_decode_slice,
  388. .end_frame = dxva2_hevc_end_frame,
  389. .frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context),
  390. .priv_data_size = sizeof(FFDXVASharedContext),
  391. };
  392. #endif
  393. #if CONFIG_HEVC_D3D11VA2_HWACCEL
  394. AVHWAccel ff_hevc_d3d11va2_hwaccel = {
  395. .name = "hevc_d3d11va2",
  396. .type = AVMEDIA_TYPE_VIDEO,
  397. .id = AV_CODEC_ID_HEVC,
  398. .pix_fmt = AV_PIX_FMT_D3D11,
  399. .init = ff_dxva2_decode_init,
  400. .uninit = ff_dxva2_decode_uninit,
  401. .start_frame = dxva2_hevc_start_frame,
  402. .decode_slice = dxva2_hevc_decode_slice,
  403. .end_frame = dxva2_hevc_end_frame,
  404. .frame_priv_data_size = sizeof(struct hevc_dxva2_picture_context),
  405. .priv_data_size = sizeof(FFDXVASharedContext),
  406. };
  407. #endif