You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

373 lines
15KB

  1. /*
  2. * DXVA2 WMV3/VC-1 HW acceleration.
  3. *
  4. * copyright (c) 2010 Laurent Aimar
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "mpegutils.h"
  23. #include "vc1.h"
  24. #include "vc1data.h"
  25. // The headers above may include w32threads.h, which uses the original
  26. // _WIN32_WINNT define, while dxva2_internal.h redefines it to target a
  27. // potentially newer version.
  28. #include "dxva2_internal.h"
  29. struct dxva2_picture_context {
  30. DXVA_PictureParameters pp;
  31. DXVA_SliceInfo si;
  32. const uint8_t *bitstream;
  33. unsigned bitstream_size;
  34. };
  35. static void fill_picture_parameters(AVCodecContext *avctx,
  36. AVDXVAContext *ctx, const VC1Context *v,
  37. DXVA_PictureParameters *pp)
  38. {
  39. const MpegEncContext *s = &v->s;
  40. const Picture *current_picture = s->current_picture_ptr;
  41. memset(pp, 0, sizeof(*pp));
  42. pp->wDecodedPictureIndex =
  43. pp->wDeblockedPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, current_picture->f);
  44. if (s->pict_type != AV_PICTURE_TYPE_I && !v->bi_type)
  45. pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_picture.f);
  46. else
  47. pp->wForwardRefPictureIndex = 0xffff;
  48. if (s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type)
  49. pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_picture.f);
  50. else
  51. pp->wBackwardRefPictureIndex = 0xffff;
  52. if (v->profile == PROFILE_ADVANCED) {
  53. /* It is the cropped width/height -1 of the frame */
  54. pp->wPicWidthInMBminus1 = avctx->width - 1;
  55. pp->wPicHeightInMBminus1= avctx->height - 1;
  56. } else {
  57. /* It is the coded width/height in macroblock -1 of the frame */
  58. pp->wPicWidthInMBminus1 = s->mb_width - 1;
  59. pp->wPicHeightInMBminus1= s->mb_height - 1;
  60. }
  61. pp->bMacroblockWidthMinus1 = 15;
  62. pp->bMacroblockHeightMinus1 = 15;
  63. pp->bBlockWidthMinus1 = 7;
  64. pp->bBlockHeightMinus1 = 7;
  65. pp->bBPPminus1 = 7;
  66. if (s->picture_structure & PICT_TOP_FIELD)
  67. pp->bPicStructure |= 0x01;
  68. if (s->picture_structure & PICT_BOTTOM_FIELD)
  69. pp->bPicStructure |= 0x02;
  70. pp->bSecondField = v->interlace && v->fcm != ILACE_FIELD && !s->first_field;
  71. pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I || v->bi_type;
  72. pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type;
  73. pp->bBidirectionalAveragingMode = (1 << 7) |
  74. ((DXVA_CONTEXT_CFG_INTRARESID(avctx, ctx) != 0) << 6) |
  75. ((DXVA_CONTEXT_CFG_RESIDACCEL(avctx, ctx) != 0) << 5) |
  76. ((v->lumscale != 32 || v->lumshift != 0) << 4) |
  77. ((v->profile == PROFILE_ADVANCED) << 3);
  78. pp->bMVprecisionAndChromaRelation = ((v->mv_mode == MV_PMODE_1MV_HPEL_BILIN) << 3) |
  79. (1 << 2) |
  80. (0 << 1) |
  81. (!s->quarter_sample );
  82. pp->bChromaFormat = v->chromaformat;
  83. DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
  84. if (DXVA_CONTEXT_REPORT_ID(avctx, ctx) >= (1 << 16))
  85. DXVA_CONTEXT_REPORT_ID(avctx, ctx) = 1;
  86. pp->bPicScanFixed = DXVA_CONTEXT_REPORT_ID(avctx, ctx) >> 8;
  87. pp->bPicScanMethod = DXVA_CONTEXT_REPORT_ID(avctx, ctx) & 0xff;
  88. pp->bPicReadbackRequests = 0;
  89. pp->bRcontrol = v->rnd;
  90. pp->bPicSpatialResid8 = (v->panscanflag << 7) |
  91. (v->refdist_flag << 6) |
  92. (s->loop_filter << 5) |
  93. (v->fastuvmc << 4) |
  94. (v->extended_mv << 3) |
  95. (v->dquant << 1) |
  96. (v->vstransform );
  97. pp->bPicOverflowBlocks = (v->quantizer_mode << 6) |
  98. (v->multires << 5) |
  99. (v->resync_marker << 4) |
  100. (v->rangered << 3) |
  101. (s->max_b_frames );
  102. pp->bPicExtrapolation = (!v->interlace || v->fcm == PROGRESSIVE) ? 1 : 2;
  103. pp->bPicDeblocked = ((!pp->bPicBackwardPrediction && v->overlap) << 6) |
  104. ((v->profile != PROFILE_ADVANCED && v->rangeredfrm) << 5) |
  105. (s->loop_filter << 1);
  106. pp->bPicDeblockConfined = (v->postprocflag << 7) |
  107. (v->broadcast << 6) |
  108. (v->interlace << 5) |
  109. (v->tfcntrflag << 4) |
  110. (v->finterpflag << 3) |
  111. ((s->pict_type != AV_PICTURE_TYPE_B) << 2) |
  112. (v->psf << 1) |
  113. (v->extended_dmv );
  114. if (s->pict_type != AV_PICTURE_TYPE_I)
  115. pp->bPic4MVallowed = v->mv_mode == MV_PMODE_MIXED_MV ||
  116. (v->mv_mode == MV_PMODE_INTENSITY_COMP &&
  117. v->mv_mode2 == MV_PMODE_MIXED_MV);
  118. if (v->profile == PROFILE_ADVANCED)
  119. pp->bPicOBMC = (v->range_mapy_flag << 7) |
  120. (v->range_mapy << 4) |
  121. (v->range_mapuv_flag << 3) |
  122. (v->range_mapuv );
  123. pp->bPicBinPB = 0;
  124. pp->bMV_RPS = 0;
  125. pp->bReservedBits = 0;
  126. if (s->picture_structure == PICT_FRAME) {
  127. pp->wBitstreamFcodes = v->lumscale;
  128. pp->wBitstreamPCEelements = v->lumshift;
  129. } else {
  130. /* Syntax: (top_field_param << 8) | bottom_field_param */
  131. pp->wBitstreamFcodes = (v->lumscale << 8) | v->lumscale;
  132. pp->wBitstreamPCEelements = (v->lumshift << 8) | v->lumshift;
  133. }
  134. pp->bBitstreamConcealmentNeed = 0;
  135. pp->bBitstreamConcealmentMethod = 0;
  136. }
  137. static void fill_slice(AVCodecContext *avctx, DXVA_SliceInfo *slice,
  138. unsigned position, unsigned size)
  139. {
  140. const VC1Context *v = avctx->priv_data;
  141. const MpegEncContext *s = &v->s;
  142. memset(slice, 0, sizeof(*slice));
  143. slice->wHorizontalPosition = 0;
  144. slice->wVerticalPosition = s->mb_y;
  145. slice->dwSliceBitsInBuffer = 8 * size;
  146. slice->dwSliceDataLocation = position;
  147. slice->bStartCodeBitOffset = 0;
  148. slice->bReservedBits = 0;
  149. slice->wMBbitOffset = get_bits_count(&s->gb);
  150. slice->wNumberMBsInSlice = s->mb_width * s->mb_height; /* XXX We assume 1 slice */
  151. slice->wQuantizerScaleCode = v->pq;
  152. slice->wBadSliceChopping = 0;
  153. }
  154. static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
  155. DECODER_BUFFER_DESC *bs,
  156. DECODER_BUFFER_DESC *sc)
  157. {
  158. const VC1Context *v = avctx->priv_data;
  159. AVDXVAContext *ctx = avctx->hwaccel_context;
  160. const MpegEncContext *s = &v->s;
  161. struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
  162. DXVA_SliceInfo *slice = &ctx_pic->si;
  163. static const uint8_t start_code[] = { 0, 0, 1, 0x0d };
  164. const unsigned start_code_size = avctx->codec_id == AV_CODEC_ID_VC1 ? sizeof(start_code) : 0;
  165. const unsigned slice_size = slice->dwSliceBitsInBuffer / 8;
  166. const unsigned padding = 128 - ((start_code_size + slice_size) & 127);
  167. const unsigned data_size = start_code_size + slice_size + padding;
  168. void *dxva_data_ptr;
  169. uint8_t *dxva_data;
  170. unsigned dxva_size;
  171. int result;
  172. unsigned type;
  173. #if CONFIG_D3D11VA
  174. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
  175. type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
  176. if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
  177. D3D11VA_CONTEXT(ctx)->decoder,
  178. type,
  179. &dxva_size, &dxva_data_ptr)))
  180. return -1;
  181. }
  182. #endif
  183. #if CONFIG_DXVA2
  184. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  185. type = DXVA2_BitStreamDateBufferType;
  186. if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
  187. type,
  188. &dxva_data_ptr, &dxva_size)))
  189. return -1;
  190. }
  191. #endif
  192. dxva_data = dxva_data_ptr;
  193. result = data_size <= dxva_size ? 0 : -1;
  194. if (!result) {
  195. if (start_code_size > 0)
  196. memcpy(dxva_data, start_code, start_code_size);
  197. memcpy(dxva_data + start_code_size,
  198. ctx_pic->bitstream + slice->dwSliceDataLocation, slice_size);
  199. if (padding > 0)
  200. memset(dxva_data + start_code_size + slice_size, 0, padding);
  201. slice->dwSliceBitsInBuffer = 8 * data_size;
  202. }
  203. #if CONFIG_D3D11VA
  204. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
  205. if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
  206. return -1;
  207. #endif
  208. #if CONFIG_DXVA2
  209. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
  210. if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
  211. return -1;
  212. #endif
  213. if (result)
  214. return result;
  215. #if CONFIG_D3D11VA
  216. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
  217. D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
  218. memset(dsc11, 0, sizeof(*dsc11));
  219. dsc11->BufferType = type;
  220. dsc11->DataSize = data_size;
  221. dsc11->NumMBsInBuffer = s->mb_width * s->mb_height;
  222. type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
  223. }
  224. #endif
  225. #if CONFIG_DXVA2
  226. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  227. DXVA2_DecodeBufferDesc *dsc2 = bs;
  228. memset(dsc2, 0, sizeof(*dsc2));
  229. dsc2->CompressedBufferType = type;
  230. dsc2->DataSize = data_size;
  231. dsc2->NumMBsInBuffer = s->mb_width * s->mb_height;
  232. type = DXVA2_SliceControlBufferType;
  233. }
  234. #endif
  235. assert((data_size & 127) == 0);
  236. return ff_dxva2_commit_buffer(avctx, ctx, sc,
  237. type,
  238. slice, sizeof(*slice), s->mb_width * s->mb_height);
  239. }
  240. static int dxva2_vc1_start_frame(AVCodecContext *avctx,
  241. av_unused const uint8_t *buffer,
  242. av_unused uint32_t size)
  243. {
  244. const VC1Context *v = avctx->priv_data;
  245. AVDXVAContext *ctx = avctx->hwaccel_context;
  246. struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
  247. if (DXVA_CONTEXT_DECODER(avctx, ctx) == NULL ||
  248. DXVA_CONTEXT_CFG(avctx, ctx) == NULL ||
  249. DXVA_CONTEXT_COUNT(avctx, ctx) <= 0)
  250. return -1;
  251. assert(ctx_pic);
  252. fill_picture_parameters(avctx, ctx, v, &ctx_pic->pp);
  253. ctx_pic->bitstream_size = 0;
  254. ctx_pic->bitstream = NULL;
  255. return 0;
  256. }
  257. static int dxva2_vc1_decode_slice(AVCodecContext *avctx,
  258. const uint8_t *buffer,
  259. uint32_t size)
  260. {
  261. const VC1Context *v = avctx->priv_data;
  262. const Picture *current_picture = v->s.current_picture_ptr;
  263. struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
  264. if (ctx_pic->bitstream_size > 0)
  265. return -1;
  266. if (avctx->codec_id == AV_CODEC_ID_VC1 &&
  267. size >= 4 && IS_MARKER(AV_RB32(buffer))) {
  268. buffer += 4;
  269. size -= 4;
  270. }
  271. ctx_pic->bitstream_size = size;
  272. ctx_pic->bitstream = buffer;
  273. fill_slice(avctx, &ctx_pic->si, 0, size);
  274. return 0;
  275. }
  276. static int dxva2_vc1_end_frame(AVCodecContext *avctx)
  277. {
  278. VC1Context *v = avctx->priv_data;
  279. struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
  280. int ret;
  281. if (ctx_pic->bitstream_size <= 0)
  282. return -1;
  283. ret = ff_dxva2_common_end_frame(avctx, v->s.current_picture_ptr->f,
  284. &ctx_pic->pp, sizeof(ctx_pic->pp),
  285. NULL, 0,
  286. commit_bitstream_and_slice_buffer);
  287. if (!ret)
  288. ff_mpeg_draw_horiz_band(&v->s, 0, avctx->height);
  289. return ret;
  290. }
  291. #if CONFIG_WMV3_DXVA2_HWACCEL
  292. AVHWAccel ff_wmv3_dxva2_hwaccel = {
  293. .name = "wmv3_dxva2",
  294. .type = AVMEDIA_TYPE_VIDEO,
  295. .id = AV_CODEC_ID_WMV3,
  296. .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
  297. .start_frame = dxva2_vc1_start_frame,
  298. .decode_slice = dxva2_vc1_decode_slice,
  299. .end_frame = dxva2_vc1_end_frame,
  300. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  301. };
  302. #endif
  303. #if CONFIG_VC1_DXVA2_HWACCEL
  304. AVHWAccel ff_vc1_dxva2_hwaccel = {
  305. .name = "vc1_dxva2",
  306. .type = AVMEDIA_TYPE_VIDEO,
  307. .id = AV_CODEC_ID_VC1,
  308. .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
  309. .start_frame = dxva2_vc1_start_frame,
  310. .decode_slice = dxva2_vc1_decode_slice,
  311. .end_frame = dxva2_vc1_end_frame,
  312. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  313. };
  314. #endif
  315. #if CONFIG_WMV3_D3D11VA_HWACCEL
  316. AVHWAccel ff_wmv3_d3d11va_hwaccel = {
  317. .name = "wmv3_d3d11va",
  318. .type = AVMEDIA_TYPE_VIDEO,
  319. .id = AV_CODEC_ID_WMV3,
  320. .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
  321. .start_frame = dxva2_vc1_start_frame,
  322. .decode_slice = dxva2_vc1_decode_slice,
  323. .end_frame = dxva2_vc1_end_frame,
  324. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  325. };
  326. #endif
  327. #if CONFIG_VC1_D3D11VA_HWACCEL
  328. AVHWAccel ff_vc1_d3d11va_hwaccel = {
  329. .name = "vc1_d3d11va",
  330. .type = AVMEDIA_TYPE_VIDEO,
  331. .id = AV_CODEC_ID_VC1,
  332. .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
  333. .start_frame = dxva2_vc1_start_frame,
  334. .decode_slice = dxva2_vc1_decode_slice,
  335. .end_frame = dxva2_vc1_end_frame,
  336. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  337. };
  338. #endif