You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

369 lines
15KB

  1. /*
  2. * DXVA2 WMV3/VC-1 HW acceleration.
  3. *
  4. * copyright (c) 2010 Laurent Aimar
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "dxva2_internal.h"
  23. #include "mpegutils.h"
  24. #include "vc1.h"
  25. #include "vc1data.h"
  26. struct dxva2_picture_context {
  27. DXVA_PictureParameters pp;
  28. DXVA_SliceInfo si;
  29. const uint8_t *bitstream;
  30. unsigned bitstream_size;
  31. };
  32. static void fill_picture_parameters(AVCodecContext *avctx,
  33. AVDXVAContext *ctx, const VC1Context *v,
  34. DXVA_PictureParameters *pp)
  35. {
  36. const MpegEncContext *s = &v->s;
  37. const Picture *current_picture = s->current_picture_ptr;
  38. memset(pp, 0, sizeof(*pp));
  39. pp->wDecodedPictureIndex =
  40. pp->wDeblockedPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, current_picture->f);
  41. if (s->pict_type != AV_PICTURE_TYPE_I && !v->bi_type)
  42. pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_picture.f);
  43. else
  44. pp->wForwardRefPictureIndex = 0xffff;
  45. if (s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type)
  46. pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_picture.f);
  47. else
  48. pp->wBackwardRefPictureIndex = 0xffff;
  49. if (v->profile == PROFILE_ADVANCED) {
  50. /* It is the cropped width/height -1 of the frame */
  51. pp->wPicWidthInMBminus1 = avctx->width - 1;
  52. pp->wPicHeightInMBminus1= avctx->height - 1;
  53. } else {
  54. /* It is the coded width/height in macroblock -1 of the frame */
  55. pp->wPicWidthInMBminus1 = s->mb_width - 1;
  56. pp->wPicHeightInMBminus1= s->mb_height - 1;
  57. }
  58. pp->bMacroblockWidthMinus1 = 15;
  59. pp->bMacroblockHeightMinus1 = 15;
  60. pp->bBlockWidthMinus1 = 7;
  61. pp->bBlockHeightMinus1 = 7;
  62. pp->bBPPminus1 = 7;
  63. if (s->picture_structure & PICT_TOP_FIELD)
  64. pp->bPicStructure |= 0x01;
  65. if (s->picture_structure & PICT_BOTTOM_FIELD)
  66. pp->bPicStructure |= 0x02;
  67. pp->bSecondField = v->interlace && v->fcm != ILACE_FIELD && !s->first_field;
  68. pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I || v->bi_type;
  69. pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type;
  70. pp->bBidirectionalAveragingMode = (1 << 7) |
  71. ((DXVA_CONTEXT_CFG_INTRARESID(avctx, ctx) != 0) << 6) |
  72. ((DXVA_CONTEXT_CFG_RESIDACCEL(avctx, ctx) != 0) << 5) |
  73. ((v->lumscale != 32 || v->lumshift != 0) << 4) |
  74. ((v->profile == PROFILE_ADVANCED) << 3);
  75. pp->bMVprecisionAndChromaRelation = ((v->mv_mode == MV_PMODE_1MV_HPEL_BILIN) << 3) |
  76. (1 << 2) |
  77. (0 << 1) |
  78. (!s->quarter_sample );
  79. pp->bChromaFormat = v->chromaformat;
  80. DXVA_CONTEXT_REPORT_ID(avctx, ctx)++;
  81. if (DXVA_CONTEXT_REPORT_ID(avctx, ctx) >= (1 << 16))
  82. DXVA_CONTEXT_REPORT_ID(avctx, ctx) = 1;
  83. pp->bPicScanFixed = DXVA_CONTEXT_REPORT_ID(avctx, ctx) >> 8;
  84. pp->bPicScanMethod = DXVA_CONTEXT_REPORT_ID(avctx, ctx) & 0xff;
  85. pp->bPicReadbackRequests = 0;
  86. pp->bRcontrol = v->rnd;
  87. pp->bPicSpatialResid8 = (v->panscanflag << 7) |
  88. (v->refdist_flag << 6) |
  89. (s->loop_filter << 5) |
  90. (v->fastuvmc << 4) |
  91. (v->extended_mv << 3) |
  92. (v->dquant << 1) |
  93. (v->vstransform );
  94. pp->bPicOverflowBlocks = (v->quantizer_mode << 6) |
  95. (v->multires << 5) |
  96. (v->resync_marker << 4) |
  97. (v->rangered << 3) |
  98. (s->max_b_frames );
  99. pp->bPicExtrapolation = (!v->interlace || v->fcm == PROGRESSIVE) ? 1 : 2;
  100. pp->bPicDeblocked = ((!pp->bPicBackwardPrediction && v->overlap) << 6) |
  101. ((v->profile != PROFILE_ADVANCED && v->rangeredfrm) << 5) |
  102. (s->loop_filter << 1);
  103. pp->bPicDeblockConfined = (v->postprocflag << 7) |
  104. (v->broadcast << 6) |
  105. (v->interlace << 5) |
  106. (v->tfcntrflag << 4) |
  107. (v->finterpflag << 3) |
  108. ((s->pict_type != AV_PICTURE_TYPE_B) << 2) |
  109. (v->psf << 1) |
  110. (v->extended_dmv );
  111. if (s->pict_type != AV_PICTURE_TYPE_I)
  112. pp->bPic4MVallowed = v->mv_mode == MV_PMODE_MIXED_MV ||
  113. (v->mv_mode == MV_PMODE_INTENSITY_COMP &&
  114. v->mv_mode2 == MV_PMODE_MIXED_MV);
  115. if (v->profile == PROFILE_ADVANCED)
  116. pp->bPicOBMC = (v->range_mapy_flag << 7) |
  117. (v->range_mapy << 4) |
  118. (v->range_mapuv_flag << 3) |
  119. (v->range_mapuv );
  120. pp->bPicBinPB = 0;
  121. pp->bMV_RPS = 0;
  122. pp->bReservedBits = 0;
  123. if (s->picture_structure == PICT_FRAME) {
  124. pp->wBitstreamFcodes = v->lumscale;
  125. pp->wBitstreamPCEelements = v->lumshift;
  126. } else {
  127. /* Syntax: (top_field_param << 8) | bottom_field_param */
  128. pp->wBitstreamFcodes = (v->lumscale << 8) | v->lumscale;
  129. pp->wBitstreamPCEelements = (v->lumshift << 8) | v->lumshift;
  130. }
  131. pp->bBitstreamConcealmentNeed = 0;
  132. pp->bBitstreamConcealmentMethod = 0;
  133. }
  134. static void fill_slice(AVCodecContext *avctx, DXVA_SliceInfo *slice,
  135. unsigned position, unsigned size)
  136. {
  137. const VC1Context *v = avctx->priv_data;
  138. const MpegEncContext *s = &v->s;
  139. memset(slice, 0, sizeof(*slice));
  140. slice->wHorizontalPosition = 0;
  141. slice->wVerticalPosition = s->mb_y;
  142. slice->dwSliceBitsInBuffer = 8 * size;
  143. slice->dwSliceDataLocation = position;
  144. slice->bStartCodeBitOffset = 0;
  145. slice->bReservedBits = 0;
  146. slice->wMBbitOffset = get_bits_count(&s->gb);
  147. slice->wNumberMBsInSlice = s->mb_width * s->mb_height; /* XXX We assume 1 slice */
  148. slice->wQuantizerScaleCode = v->pq;
  149. slice->wBadSliceChopping = 0;
  150. }
  151. static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
  152. DECODER_BUFFER_DESC *bs,
  153. DECODER_BUFFER_DESC *sc)
  154. {
  155. const VC1Context *v = avctx->priv_data;
  156. AVDXVAContext *ctx = avctx->hwaccel_context;
  157. const MpegEncContext *s = &v->s;
  158. struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
  159. DXVA_SliceInfo *slice = &ctx_pic->si;
  160. static const uint8_t start_code[] = { 0, 0, 1, 0x0d };
  161. const unsigned start_code_size = avctx->codec_id == AV_CODEC_ID_VC1 ? sizeof(start_code) : 0;
  162. const unsigned slice_size = slice->dwSliceBitsInBuffer / 8;
  163. const unsigned padding = 128 - ((start_code_size + slice_size) & 127);
  164. const unsigned data_size = start_code_size + slice_size + padding;
  165. void *dxva_data_ptr;
  166. uint8_t *dxva_data;
  167. unsigned dxva_size;
  168. int result;
  169. unsigned type;
  170. #if CONFIG_D3D11VA
  171. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
  172. type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
  173. if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
  174. D3D11VA_CONTEXT(ctx)->decoder,
  175. type,
  176. &dxva_size, &dxva_data_ptr)))
  177. return -1;
  178. }
  179. #endif
  180. #if CONFIG_DXVA2
  181. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  182. type = DXVA2_BitStreamDateBufferType;
  183. if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
  184. type,
  185. &dxva_data_ptr, &dxva_size)))
  186. return -1;
  187. }
  188. #endif
  189. dxva_data = dxva_data_ptr;
  190. result = data_size <= dxva_size ? 0 : -1;
  191. if (!result) {
  192. if (start_code_size > 0)
  193. memcpy(dxva_data, start_code, start_code_size);
  194. memcpy(dxva_data + start_code_size,
  195. ctx_pic->bitstream + slice->dwSliceDataLocation, slice_size);
  196. if (padding > 0)
  197. memset(dxva_data + start_code_size + slice_size, 0, padding);
  198. slice->dwSliceBitsInBuffer = 8 * data_size;
  199. }
  200. #if CONFIG_D3D11VA
  201. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
  202. if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
  203. return -1;
  204. #endif
  205. #if CONFIG_DXVA2
  206. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
  207. if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
  208. return -1;
  209. #endif
  210. if (result)
  211. return result;
  212. #if CONFIG_D3D11VA
  213. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
  214. D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
  215. memset(dsc11, 0, sizeof(*dsc11));
  216. dsc11->BufferType = type;
  217. dsc11->DataSize = data_size;
  218. dsc11->NumMBsInBuffer = s->mb_width * s->mb_height;
  219. type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
  220. }
  221. #endif
  222. #if CONFIG_DXVA2
  223. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  224. DXVA2_DecodeBufferDesc *dsc2 = bs;
  225. memset(dsc2, 0, sizeof(*dsc2));
  226. dsc2->CompressedBufferType = type;
  227. dsc2->DataSize = data_size;
  228. dsc2->NumMBsInBuffer = s->mb_width * s->mb_height;
  229. type = DXVA2_SliceControlBufferType;
  230. }
  231. #endif
  232. assert((data_size & 127) == 0);
  233. return ff_dxva2_commit_buffer(avctx, ctx, sc,
  234. type,
  235. slice, sizeof(*slice), s->mb_width * s->mb_height);
  236. }
  237. static int dxva2_vc1_start_frame(AVCodecContext *avctx,
  238. av_unused const uint8_t *buffer,
  239. av_unused uint32_t size)
  240. {
  241. const VC1Context *v = avctx->priv_data;
  242. AVDXVAContext *ctx = avctx->hwaccel_context;
  243. struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
  244. if (DXVA_CONTEXT_DECODER(avctx, ctx) == NULL ||
  245. DXVA_CONTEXT_CFG(avctx, ctx) == NULL ||
  246. DXVA_CONTEXT_COUNT(avctx, ctx) <= 0)
  247. return -1;
  248. assert(ctx_pic);
  249. fill_picture_parameters(avctx, ctx, v, &ctx_pic->pp);
  250. ctx_pic->bitstream_size = 0;
  251. ctx_pic->bitstream = NULL;
  252. return 0;
  253. }
  254. static int dxva2_vc1_decode_slice(AVCodecContext *avctx,
  255. const uint8_t *buffer,
  256. uint32_t size)
  257. {
  258. const VC1Context *v = avctx->priv_data;
  259. const Picture *current_picture = v->s.current_picture_ptr;
  260. struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
  261. if (ctx_pic->bitstream_size > 0)
  262. return -1;
  263. if (avctx->codec_id == AV_CODEC_ID_VC1 &&
  264. size >= 4 && IS_MARKER(AV_RB32(buffer))) {
  265. buffer += 4;
  266. size -= 4;
  267. }
  268. ctx_pic->bitstream_size = size;
  269. ctx_pic->bitstream = buffer;
  270. fill_slice(avctx, &ctx_pic->si, 0, size);
  271. return 0;
  272. }
  273. static int dxva2_vc1_end_frame(AVCodecContext *avctx)
  274. {
  275. VC1Context *v = avctx->priv_data;
  276. struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
  277. int ret;
  278. if (ctx_pic->bitstream_size <= 0)
  279. return -1;
  280. ret = ff_dxva2_common_end_frame(avctx, v->s.current_picture_ptr->f,
  281. &ctx_pic->pp, sizeof(ctx_pic->pp),
  282. NULL, 0,
  283. commit_bitstream_and_slice_buffer);
  284. if (!ret)
  285. ff_mpeg_draw_horiz_band(&v->s, 0, avctx->height);
  286. return ret;
  287. }
  288. #if CONFIG_WMV3_DXVA2_HWACCEL
  289. AVHWAccel ff_wmv3_dxva2_hwaccel = {
  290. .name = "wmv3_dxva2",
  291. .type = AVMEDIA_TYPE_VIDEO,
  292. .id = AV_CODEC_ID_WMV3,
  293. .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
  294. .start_frame = dxva2_vc1_start_frame,
  295. .decode_slice = dxva2_vc1_decode_slice,
  296. .end_frame = dxva2_vc1_end_frame,
  297. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  298. };
  299. #endif
  300. #if CONFIG_VC1_DXVA2_HWACCEL
  301. AVHWAccel ff_vc1_dxva2_hwaccel = {
  302. .name = "vc1_dxva2",
  303. .type = AVMEDIA_TYPE_VIDEO,
  304. .id = AV_CODEC_ID_VC1,
  305. .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
  306. .start_frame = dxva2_vc1_start_frame,
  307. .decode_slice = dxva2_vc1_decode_slice,
  308. .end_frame = dxva2_vc1_end_frame,
  309. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  310. };
  311. #endif
  312. #if CONFIG_WMV3_D3D11VA_HWACCEL
  313. AVHWAccel ff_wmv3_d3d11va_hwaccel = {
  314. .name = "wmv3_d3d11va",
  315. .type = AVMEDIA_TYPE_VIDEO,
  316. .id = AV_CODEC_ID_WMV3,
  317. .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
  318. .start_frame = dxva2_vc1_start_frame,
  319. .decode_slice = dxva2_vc1_decode_slice,
  320. .end_frame = dxva2_vc1_end_frame,
  321. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  322. };
  323. #endif
  324. #if CONFIG_VC1_D3D11VA_HWACCEL
  325. AVHWAccel ff_vc1_d3d11va_hwaccel = {
  326. .name = "vc1_d3d11va",
  327. .type = AVMEDIA_TYPE_VIDEO,
  328. .id = AV_CODEC_ID_VC1,
  329. .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
  330. .start_frame = dxva2_vc1_start_frame,
  331. .decode_slice = dxva2_vc1_decode_slice,
  332. .end_frame = dxva2_vc1_end_frame,
  333. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  334. };
  335. #endif