You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

369 lines
14KB

  1. /*
  2. * MPEG-2 HW acceleration.
  3. *
  4. * copyright (c) 2010 Laurent Aimar
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/log.h"
  23. #include "mpegutils.h"
  24. #include "mpegvideo.h"
  25. // The headers above may include w32threads.h, which uses the original
  26. // _WIN32_WINNT define, while dxva2_internal.h redefines it to target a
  27. // potentially newer version.
  28. #include "dxva2_internal.h"
  29. #define MAX_SLICES 1024
  30. struct dxva2_picture_context {
  31. DXVA_PictureParameters pp;
  32. DXVA_QmatrixData qm;
  33. unsigned slice_count;
  34. DXVA_SliceInfo slice[MAX_SLICES];
  35. const uint8_t *bitstream;
  36. unsigned bitstream_size;
  37. };
  38. static void fill_picture_parameters(AVCodecContext *avctx,
  39. AVDXVAContext *ctx,
  40. const struct MpegEncContext *s,
  41. DXVA_PictureParameters *pp)
  42. {
  43. const Picture *current_picture = s->current_picture_ptr;
  44. int is_field = s->picture_structure != PICT_FRAME;
  45. memset(pp, 0, sizeof(*pp));
  46. pp->wDecodedPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, current_picture->f);
  47. pp->wDeblockedPictureIndex = 0;
  48. if (s->pict_type != AV_PICTURE_TYPE_I)
  49. pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_picture.f);
  50. else
  51. pp->wForwardRefPictureIndex = 0xffff;
  52. if (s->pict_type == AV_PICTURE_TYPE_B)
  53. pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_picture.f);
  54. else
  55. pp->wBackwardRefPictureIndex = 0xffff;
  56. pp->wPicWidthInMBminus1 = s->mb_width - 1;
  57. pp->wPicHeightInMBminus1 = (s->mb_height >> is_field) - 1;
  58. pp->bMacroblockWidthMinus1 = 15;
  59. pp->bMacroblockHeightMinus1 = 15;
  60. pp->bBlockWidthMinus1 = 7;
  61. pp->bBlockHeightMinus1 = 7;
  62. pp->bBPPminus1 = 7;
  63. pp->bPicStructure = s->picture_structure;
  64. pp->bSecondField = is_field && !s->first_field;
  65. pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
  66. pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
  67. pp->bBidirectionalAveragingMode = 0;
  68. pp->bMVprecisionAndChromaRelation= 0; /* FIXME */
  69. pp->bChromaFormat = s->chroma_format;
  70. pp->bPicScanFixed = 1;
  71. pp->bPicScanMethod = s->alternate_scan ? 1 : 0;
  72. pp->bPicReadbackRequests = 0;
  73. pp->bRcontrol = 0;
  74. pp->bPicSpatialResid8 = 0;
  75. pp->bPicOverflowBlocks = 0;
  76. pp->bPicExtrapolation = 0;
  77. pp->bPicDeblocked = 0;
  78. pp->bPicDeblockConfined = 0;
  79. pp->bPic4MVallowed = 0;
  80. pp->bPicOBMC = 0;
  81. pp->bPicBinPB = 0;
  82. pp->bMV_RPS = 0;
  83. pp->bReservedBits = 0;
  84. pp->wBitstreamFcodes = (s->mpeg_f_code[0][0] << 12) |
  85. (s->mpeg_f_code[0][1] << 8) |
  86. (s->mpeg_f_code[1][0] << 4) |
  87. (s->mpeg_f_code[1][1] );
  88. pp->wBitstreamPCEelements = (s->intra_dc_precision << 14) |
  89. (s->picture_structure << 12) |
  90. (s->top_field_first << 11) |
  91. (s->frame_pred_frame_dct << 10) |
  92. (s->concealment_motion_vectors << 9) |
  93. (s->q_scale_type << 8) |
  94. (s->intra_vlc_format << 7) |
  95. (s->alternate_scan << 6) |
  96. (s->repeat_first_field << 5) |
  97. (s->chroma_420_type << 4) |
  98. (s->progressive_frame << 3);
  99. pp->bBitstreamConcealmentNeed = 0;
  100. pp->bBitstreamConcealmentMethod = 0;
  101. }
  102. static void fill_quantization_matrices(AVCodecContext *avctx,
  103. AVDXVAContext *ctx,
  104. const struct MpegEncContext *s,
  105. DXVA_QmatrixData *qm)
  106. {
  107. int i;
  108. for (i = 0; i < 4; i++)
  109. qm->bNewQmatrix[i] = 1;
  110. for (i = 0; i < 64; i++) {
  111. int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  112. qm->Qmatrix[0][i] = s->intra_matrix[n];
  113. qm->Qmatrix[1][i] = s->inter_matrix[n];
  114. qm->Qmatrix[2][i] = s->chroma_intra_matrix[n];
  115. qm->Qmatrix[3][i] = s->chroma_inter_matrix[n];
  116. }
  117. }
  118. static void fill_slice(AVCodecContext *avctx,
  119. const struct MpegEncContext *s,
  120. DXVA_SliceInfo *slice,
  121. unsigned position,
  122. const uint8_t *buffer, unsigned size)
  123. {
  124. int is_field = s->picture_structure != PICT_FRAME;
  125. GetBitContext gb;
  126. memset(slice, 0, sizeof(*slice));
  127. slice->wHorizontalPosition = s->mb_x;
  128. slice->wVerticalPosition = s->mb_y >> is_field;
  129. slice->dwSliceBitsInBuffer = 8 * size;
  130. slice->dwSliceDataLocation = position;
  131. slice->bStartCodeBitOffset = 0;
  132. slice->bReservedBits = 0;
  133. /* XXX We store the index of the first MB and it will be fixed later */
  134. slice->wNumberMBsInSlice = (s->mb_y >> is_field) * s->mb_width + s->mb_x;
  135. slice->wBadSliceChopping = 0;
  136. init_get_bits(&gb, &buffer[4], 8 * (size - 4));
  137. slice->wQuantizerScaleCode = get_bits(&gb, 5);
  138. skip_1stop_8data_bits(&gb);
  139. slice->wMBbitOffset = 4 * 8 + get_bits_count(&gb);
  140. }
  141. static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
  142. DECODER_BUFFER_DESC *bs,
  143. DECODER_BUFFER_DESC *sc)
  144. {
  145. const struct MpegEncContext *s = avctx->priv_data;
  146. AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
  147. struct dxva2_picture_context *ctx_pic =
  148. s->current_picture_ptr->hwaccel_picture_private;
  149. const int is_field = s->picture_structure != PICT_FRAME;
  150. const unsigned mb_count = s->mb_width * (s->mb_height >> is_field);
  151. void *dxva_data_ptr;
  152. uint8_t *dxva_data, *current, *end;
  153. unsigned dxva_size;
  154. unsigned i;
  155. unsigned type;
  156. #if CONFIG_D3D11VA
  157. if (ff_dxva2_is_d3d11(avctx)) {
  158. type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
  159. if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
  160. D3D11VA_CONTEXT(ctx)->decoder,
  161. type,
  162. &dxva_size, &dxva_data_ptr)))
  163. return -1;
  164. }
  165. #endif
  166. #if CONFIG_DXVA2
  167. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  168. type = DXVA2_BitStreamDateBufferType;
  169. if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
  170. type,
  171. &dxva_data_ptr, &dxva_size)))
  172. return -1;
  173. }
  174. #endif
  175. dxva_data = dxva_data_ptr;
  176. current = dxva_data;
  177. end = dxva_data + dxva_size;
  178. for (i = 0; i < ctx_pic->slice_count; i++) {
  179. DXVA_SliceInfo *slice = &ctx_pic->slice[i];
  180. unsigned position = slice->dwSliceDataLocation;
  181. unsigned size = slice->dwSliceBitsInBuffer / 8;
  182. if (size > end - current) {
  183. av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream");
  184. break;
  185. }
  186. slice->dwSliceDataLocation = current - dxva_data;
  187. if (i < ctx_pic->slice_count - 1)
  188. slice->wNumberMBsInSlice =
  189. slice[1].wNumberMBsInSlice - slice[0].wNumberMBsInSlice;
  190. else
  191. slice->wNumberMBsInSlice =
  192. mb_count - slice[0].wNumberMBsInSlice;
  193. memcpy(current, &ctx_pic->bitstream[position], size);
  194. current += size;
  195. }
  196. #if CONFIG_D3D11VA
  197. if (ff_dxva2_is_d3d11(avctx))
  198. if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
  199. return -1;
  200. #endif
  201. #if CONFIG_DXVA2
  202. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
  203. if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
  204. return -1;
  205. #endif
  206. if (i < ctx_pic->slice_count)
  207. return -1;
  208. #if CONFIG_D3D11VA
  209. if (ff_dxva2_is_d3d11(avctx)) {
  210. D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
  211. memset(dsc11, 0, sizeof(*dsc11));
  212. dsc11->BufferType = type;
  213. dsc11->DataSize = current - dxva_data;
  214. dsc11->NumMBsInBuffer = mb_count;
  215. type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
  216. }
  217. #endif
  218. #if CONFIG_DXVA2
  219. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  220. DXVA2_DecodeBufferDesc *dsc2 = bs;
  221. memset(dsc2, 0, sizeof(*dsc2));
  222. dsc2->CompressedBufferType = type;
  223. dsc2->DataSize = current - dxva_data;
  224. dsc2->NumMBsInBuffer = mb_count;
  225. type = DXVA2_SliceControlBufferType;
  226. }
  227. #endif
  228. return ff_dxva2_commit_buffer(avctx, ctx, sc,
  229. type,
  230. ctx_pic->slice,
  231. ctx_pic->slice_count * sizeof(*ctx_pic->slice),
  232. mb_count);
  233. }
  234. static int dxva2_mpeg2_start_frame(AVCodecContext *avctx,
  235. av_unused const uint8_t *buffer,
  236. av_unused uint32_t size)
  237. {
  238. const struct MpegEncContext *s = avctx->priv_data;
  239. AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
  240. struct dxva2_picture_context *ctx_pic =
  241. s->current_picture_ptr->hwaccel_picture_private;
  242. if (!DXVA_CONTEXT_VALID(avctx, ctx))
  243. return -1;
  244. assert(ctx_pic);
  245. fill_picture_parameters(avctx, ctx, s, &ctx_pic->pp);
  246. fill_quantization_matrices(avctx, ctx, s, &ctx_pic->qm);
  247. ctx_pic->slice_count = 0;
  248. ctx_pic->bitstream_size = 0;
  249. ctx_pic->bitstream = NULL;
  250. return 0;
  251. }
  252. static int dxva2_mpeg2_decode_slice(AVCodecContext *avctx,
  253. const uint8_t *buffer, uint32_t size)
  254. {
  255. const struct MpegEncContext *s = avctx->priv_data;
  256. struct dxva2_picture_context *ctx_pic =
  257. s->current_picture_ptr->hwaccel_picture_private;
  258. unsigned position;
  259. if (ctx_pic->slice_count >= MAX_SLICES) {
  260. avpriv_request_sample(avctx, "%d slices in dxva2",
  261. ctx_pic->slice_count);
  262. return -1;
  263. }
  264. if (!ctx_pic->bitstream)
  265. ctx_pic->bitstream = buffer;
  266. ctx_pic->bitstream_size += size;
  267. position = buffer - ctx_pic->bitstream;
  268. fill_slice(avctx, s, &ctx_pic->slice[ctx_pic->slice_count++], position,
  269. buffer, size);
  270. return 0;
  271. }
  272. static int dxva2_mpeg2_end_frame(AVCodecContext *avctx)
  273. {
  274. struct MpegEncContext *s = avctx->priv_data;
  275. struct dxva2_picture_context *ctx_pic =
  276. s->current_picture_ptr->hwaccel_picture_private;
  277. int ret;
  278. if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
  279. return -1;
  280. ret = ff_dxva2_common_end_frame(avctx, s->current_picture_ptr->f,
  281. &ctx_pic->pp, sizeof(ctx_pic->pp),
  282. &ctx_pic->qm, sizeof(ctx_pic->qm),
  283. commit_bitstream_and_slice_buffer);
  284. if (!ret)
  285. ff_mpeg_draw_horiz_band(s, 0, avctx->height);
  286. return ret;
  287. }
  288. #if CONFIG_MPEG2_DXVA2_HWACCEL
  289. AVHWAccel ff_mpeg2_dxva2_hwaccel = {
  290. .name = "mpeg2_dxva2",
  291. .type = AVMEDIA_TYPE_VIDEO,
  292. .id = AV_CODEC_ID_MPEG2VIDEO,
  293. .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
  294. .init = ff_dxva2_decode_init,
  295. .uninit = ff_dxva2_decode_uninit,
  296. .start_frame = dxva2_mpeg2_start_frame,
  297. .decode_slice = dxva2_mpeg2_decode_slice,
  298. .end_frame = dxva2_mpeg2_end_frame,
  299. .frame_params = ff_dxva2_common_frame_params,
  300. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  301. .priv_data_size = sizeof(FFDXVASharedContext),
  302. };
  303. #endif
  304. #if CONFIG_MPEG2_D3D11VA_HWACCEL
  305. AVHWAccel ff_mpeg2_d3d11va_hwaccel = {
  306. .name = "mpeg2_d3d11va",
  307. .type = AVMEDIA_TYPE_VIDEO,
  308. .id = AV_CODEC_ID_MPEG2VIDEO,
  309. .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
  310. .init = ff_dxva2_decode_init,
  311. .uninit = ff_dxva2_decode_uninit,
  312. .start_frame = dxva2_mpeg2_start_frame,
  313. .decode_slice = dxva2_mpeg2_decode_slice,
  314. .end_frame = dxva2_mpeg2_end_frame,
  315. .frame_params = ff_dxva2_common_frame_params,
  316. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  317. .priv_data_size = sizeof(FFDXVASharedContext),
  318. };
  319. #endif
  320. #if CONFIG_MPEG2_D3D11VA2_HWACCEL
  321. AVHWAccel ff_mpeg2_d3d11va2_hwaccel = {
  322. .name = "mpeg2_d3d11va2",
  323. .type = AVMEDIA_TYPE_VIDEO,
  324. .id = AV_CODEC_ID_MPEG2VIDEO,
  325. .pix_fmt = AV_PIX_FMT_D3D11,
  326. .init = ff_dxva2_decode_init,
  327. .uninit = ff_dxva2_decode_uninit,
  328. .start_frame = dxva2_mpeg2_start_frame,
  329. .decode_slice = dxva2_mpeg2_decode_slice,
  330. .end_frame = dxva2_mpeg2_end_frame,
  331. .frame_params = ff_dxva2_common_frame_params,
  332. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  333. .priv_data_size = sizeof(FFDXVASharedContext),
  334. };
  335. #endif