You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

343 lines
13KB

  1. /*
  2. * MPEG-2 HW acceleration.
  3. *
  4. * copyright (c) 2010 Laurent Aimar
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/log.h"
  23. #include "dxva2_internal.h"
  24. #include "mpegutils.h"
  25. #include "mpegvideo.h"
  26. #define MAX_SLICES 1024
  27. struct dxva2_picture_context {
  28. DXVA_PictureParameters pp;
  29. DXVA_QmatrixData qm;
  30. unsigned slice_count;
  31. DXVA_SliceInfo slice[MAX_SLICES];
  32. const uint8_t *bitstream;
  33. unsigned bitstream_size;
  34. };
  35. static void fill_picture_parameters(AVCodecContext *avctx,
  36. AVDXVAContext *ctx,
  37. const struct MpegEncContext *s,
  38. DXVA_PictureParameters *pp)
  39. {
  40. const Picture *current_picture = s->current_picture_ptr;
  41. int is_field = s->picture_structure != PICT_FRAME;
  42. memset(pp, 0, sizeof(*pp));
  43. pp->wDecodedPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, current_picture->f);
  44. pp->wDeblockedPictureIndex = 0;
  45. if (s->pict_type != AV_PICTURE_TYPE_I)
  46. pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->last_picture.f);
  47. else
  48. pp->wForwardRefPictureIndex = 0xffff;
  49. if (s->pict_type == AV_PICTURE_TYPE_B)
  50. pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(avctx, ctx, s->next_picture.f);
  51. else
  52. pp->wBackwardRefPictureIndex = 0xffff;
  53. pp->wPicWidthInMBminus1 = s->mb_width - 1;
  54. pp->wPicHeightInMBminus1 = (s->mb_height >> is_field) - 1;
  55. pp->bMacroblockWidthMinus1 = 15;
  56. pp->bMacroblockHeightMinus1 = 15;
  57. pp->bBlockWidthMinus1 = 7;
  58. pp->bBlockHeightMinus1 = 7;
  59. pp->bBPPminus1 = 7;
  60. pp->bPicStructure = s->picture_structure;
  61. pp->bSecondField = is_field && !s->first_field;
  62. pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
  63. pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
  64. pp->bBidirectionalAveragingMode = 0;
  65. pp->bMVprecisionAndChromaRelation= 0; /* FIXME */
  66. pp->bChromaFormat = s->chroma_format;
  67. pp->bPicScanFixed = 1;
  68. pp->bPicScanMethod = s->alternate_scan ? 1 : 0;
  69. pp->bPicReadbackRequests = 0;
  70. pp->bRcontrol = 0;
  71. pp->bPicSpatialResid8 = 0;
  72. pp->bPicOverflowBlocks = 0;
  73. pp->bPicExtrapolation = 0;
  74. pp->bPicDeblocked = 0;
  75. pp->bPicDeblockConfined = 0;
  76. pp->bPic4MVallowed = 0;
  77. pp->bPicOBMC = 0;
  78. pp->bPicBinPB = 0;
  79. pp->bMV_RPS = 0;
  80. pp->bReservedBits = 0;
  81. pp->wBitstreamFcodes = (s->mpeg_f_code[0][0] << 12) |
  82. (s->mpeg_f_code[0][1] << 8) |
  83. (s->mpeg_f_code[1][0] << 4) |
  84. (s->mpeg_f_code[1][1] );
  85. pp->wBitstreamPCEelements = (s->intra_dc_precision << 14) |
  86. (s->picture_structure << 12) |
  87. (s->top_field_first << 11) |
  88. (s->frame_pred_frame_dct << 10) |
  89. (s->concealment_motion_vectors << 9) |
  90. (s->q_scale_type << 8) |
  91. (s->intra_vlc_format << 7) |
  92. (s->alternate_scan << 6) |
  93. (s->repeat_first_field << 5) |
  94. (s->chroma_420_type << 4) |
  95. (s->progressive_frame << 3);
  96. pp->bBitstreamConcealmentNeed = 0;
  97. pp->bBitstreamConcealmentMethod = 0;
  98. }
  99. static void fill_quantization_matrices(AVCodecContext *avctx,
  100. AVDXVAContext *ctx,
  101. const struct MpegEncContext *s,
  102. DXVA_QmatrixData *qm)
  103. {
  104. int i;
  105. for (i = 0; i < 4; i++)
  106. qm->bNewQmatrix[i] = 1;
  107. for (i = 0; i < 64; i++) {
  108. int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
  109. qm->Qmatrix[0][i] = s->intra_matrix[n];;
  110. qm->Qmatrix[1][i] = s->inter_matrix[n];;
  111. qm->Qmatrix[2][i] = s->chroma_intra_matrix[n];;
  112. qm->Qmatrix[3][i] = s->chroma_inter_matrix[n];;
  113. }
  114. }
  115. static void fill_slice(AVCodecContext *avctx,
  116. const struct MpegEncContext *s,
  117. DXVA_SliceInfo *slice,
  118. unsigned position,
  119. const uint8_t *buffer, unsigned size)
  120. {
  121. int is_field = s->picture_structure != PICT_FRAME;
  122. GetBitContext gb;
  123. memset(slice, 0, sizeof(*slice));
  124. slice->wHorizontalPosition = s->mb_x;
  125. slice->wVerticalPosition = s->mb_y >> is_field;
  126. slice->dwSliceBitsInBuffer = 8 * size;
  127. slice->dwSliceDataLocation = position;
  128. slice->bStartCodeBitOffset = 0;
  129. slice->bReservedBits = 0;
  130. /* XXX We store the index of the first MB and it will be fixed later */
  131. slice->wNumberMBsInSlice = (s->mb_y >> is_field) * s->mb_width + s->mb_x;
  132. slice->wBadSliceChopping = 0;
  133. init_get_bits(&gb, &buffer[4], 8 * (size - 4));
  134. slice->wQuantizerScaleCode = get_bits(&gb, 5);
  135. while (get_bits1(&gb))
  136. skip_bits(&gb, 8);
  137. slice->wMBbitOffset = 4 * 8 + get_bits_count(&gb);
  138. }
  139. static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
  140. DECODER_BUFFER_DESC *bs,
  141. DECODER_BUFFER_DESC *sc)
  142. {
  143. const struct MpegEncContext *s = avctx->priv_data;
  144. AVDXVAContext *ctx = avctx->hwaccel_context;
  145. struct dxva2_picture_context *ctx_pic =
  146. s->current_picture_ptr->hwaccel_picture_private;
  147. const int is_field = s->picture_structure != PICT_FRAME;
  148. const unsigned mb_count = s->mb_width * (s->mb_height >> is_field);
  149. void *dxva_data_ptr;
  150. uint8_t *dxva_data, *current, *end;
  151. unsigned dxva_size;
  152. unsigned i;
  153. unsigned type;
  154. #if CONFIG_D3D11VA
  155. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
  156. type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM;
  157. if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
  158. D3D11VA_CONTEXT(ctx)->decoder,
  159. type,
  160. &dxva_size, &dxva_data_ptr)))
  161. return -1;
  162. }
  163. #endif
  164. #if CONFIG_DXVA2
  165. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  166. type = DXVA2_BitStreamDateBufferType;
  167. if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder,
  168. type,
  169. &dxva_data_ptr, &dxva_size)))
  170. return -1;
  171. }
  172. #endif
  173. dxva_data = dxva_data_ptr;
  174. current = dxva_data;
  175. end = dxva_data + dxva_size;
  176. for (i = 0; i < ctx_pic->slice_count; i++) {
  177. DXVA_SliceInfo *slice = &ctx_pic->slice[i];
  178. unsigned position = slice->dwSliceDataLocation;
  179. unsigned size = slice->dwSliceBitsInBuffer / 8;
  180. if (size > end - current) {
  181. av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream");
  182. break;
  183. }
  184. slice->dwSliceDataLocation = current - dxva_data;
  185. if (i < ctx_pic->slice_count - 1)
  186. slice->wNumberMBsInSlice =
  187. slice[1].wNumberMBsInSlice - slice[0].wNumberMBsInSlice;
  188. else
  189. slice->wNumberMBsInSlice =
  190. mb_count - slice[0].wNumberMBsInSlice;
  191. memcpy(current, &ctx_pic->bitstream[position], size);
  192. current += size;
  193. }
  194. #if CONFIG_D3D11VA
  195. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
  196. if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type)))
  197. return -1;
  198. #endif
  199. #if CONFIG_DXVA2
  200. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
  201. if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type)))
  202. return -1;
  203. #endif
  204. if (i < ctx_pic->slice_count)
  205. return -1;
  206. #if CONFIG_D3D11VA
  207. if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
  208. D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs;
  209. memset(dsc11, 0, sizeof(*dsc11));
  210. dsc11->BufferType = type;
  211. dsc11->DataSize = current - dxva_data;
  212. dsc11->NumMBsInBuffer = mb_count;
  213. type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL;
  214. }
  215. #endif
  216. #if CONFIG_DXVA2
  217. if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
  218. DXVA2_DecodeBufferDesc *dsc2 = bs;
  219. memset(dsc2, 0, sizeof(*dsc2));
  220. dsc2->CompressedBufferType = type;
  221. dsc2->DataSize = current - dxva_data;
  222. dsc2->NumMBsInBuffer = mb_count;
  223. type = DXVA2_SliceControlBufferType;
  224. }
  225. #endif
  226. return ff_dxva2_commit_buffer(avctx, ctx, sc,
  227. type,
  228. ctx_pic->slice,
  229. ctx_pic->slice_count * sizeof(*ctx_pic->slice),
  230. mb_count);
  231. }
  232. static int dxva2_mpeg2_start_frame(AVCodecContext *avctx,
  233. av_unused const uint8_t *buffer,
  234. av_unused uint32_t size)
  235. {
  236. const struct MpegEncContext *s = avctx->priv_data;
  237. AVDXVAContext *ctx = avctx->hwaccel_context;
  238. struct dxva2_picture_context *ctx_pic =
  239. s->current_picture_ptr->hwaccel_picture_private;
  240. if (DXVA_CONTEXT_DECODER(avctx, ctx) == NULL ||
  241. DXVA_CONTEXT_CFG(avctx, ctx) == NULL ||
  242. DXVA_CONTEXT_COUNT(avctx, ctx) <= 0)
  243. return -1;
  244. assert(ctx_pic);
  245. fill_picture_parameters(avctx, ctx, s, &ctx_pic->pp);
  246. fill_quantization_matrices(avctx, ctx, s, &ctx_pic->qm);
  247. ctx_pic->slice_count = 0;
  248. ctx_pic->bitstream_size = 0;
  249. ctx_pic->bitstream = NULL;
  250. return 0;
  251. }
  252. static int dxva2_mpeg2_decode_slice(AVCodecContext *avctx,
  253. const uint8_t *buffer, uint32_t size)
  254. {
  255. const struct MpegEncContext *s = avctx->priv_data;
  256. struct dxva2_picture_context *ctx_pic =
  257. s->current_picture_ptr->hwaccel_picture_private;
  258. unsigned position;
  259. if (ctx_pic->slice_count >= MAX_SLICES) {
  260. avpriv_request_sample(avctx, "%d slices in dxva2",
  261. ctx_pic->slice_count);
  262. return -1;
  263. }
  264. if (!ctx_pic->bitstream)
  265. ctx_pic->bitstream = buffer;
  266. ctx_pic->bitstream_size += size;
  267. position = buffer - ctx_pic->bitstream;
  268. fill_slice(avctx, s, &ctx_pic->slice[ctx_pic->slice_count++], position,
  269. buffer, size);
  270. return 0;
  271. }
  272. static int dxva2_mpeg2_end_frame(AVCodecContext *avctx)
  273. {
  274. struct MpegEncContext *s = avctx->priv_data;
  275. struct dxva2_picture_context *ctx_pic =
  276. s->current_picture_ptr->hwaccel_picture_private;
  277. int ret;
  278. if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
  279. return -1;
  280. ret = ff_dxva2_common_end_frame(avctx, s->current_picture_ptr->f,
  281. &ctx_pic->pp, sizeof(ctx_pic->pp),
  282. &ctx_pic->qm, sizeof(ctx_pic->qm),
  283. commit_bitstream_and_slice_buffer);
  284. if (!ret)
  285. ff_mpeg_draw_horiz_band(s, 0, avctx->height);
  286. return ret;
  287. }
  288. #if CONFIG_MPEG2_DXVA2_HWACCEL
  289. AVHWAccel ff_mpeg2_dxva2_hwaccel = {
  290. .name = "mpeg2_dxva2",
  291. .type = AVMEDIA_TYPE_VIDEO,
  292. .id = AV_CODEC_ID_MPEG2VIDEO,
  293. .pix_fmt = AV_PIX_FMT_DXVA2_VLD,
  294. .start_frame = dxva2_mpeg2_start_frame,
  295. .decode_slice = dxva2_mpeg2_decode_slice,
  296. .end_frame = dxva2_mpeg2_end_frame,
  297. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  298. };
  299. #endif
  300. #if CONFIG_MPEG2_D3D11VA_HWACCEL
  301. AVHWAccel ff_mpeg2_d3d11va_hwaccel = {
  302. .name = "mpeg2_d3d11va",
  303. .type = AVMEDIA_TYPE_VIDEO,
  304. .id = AV_CODEC_ID_MPEG2VIDEO,
  305. .pix_fmt = AV_PIX_FMT_D3D11VA_VLD,
  306. .start_frame = dxva2_mpeg2_start_frame,
  307. .decode_slice = dxva2_mpeg2_decode_slice,
  308. .end_frame = dxva2_mpeg2_end_frame,
  309. .frame_priv_data_size = sizeof(struct dxva2_picture_context),
  310. };
  311. #endif