You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

505 lines
15KB

  1. /*
  2. * Intel MediaSDK QSV codec-independent code
  3. *
  4. * copyright (c) 2013 Luca Barbato
  5. * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <string.h>
  24. #include <sys/types.h>
  25. #include <mfx/mfxvideo.h>
  26. #include "libavutil/common.h"
  27. #include "libavutil/hwcontext.h"
  28. #include "libavutil/hwcontext_qsv.h"
  29. #include "libavutil/mem.h"
  30. #include "libavutil/log.h"
  31. #include "libavutil/pixfmt.h"
  32. #include "libavutil/time.h"
  33. #include "avcodec.h"
  34. #include "internal.h"
  35. #include "qsv.h"
  36. #include "qsv_internal.h"
  37. #include "qsvdec.h"
  38. int ff_qsv_map_pixfmt(enum AVPixelFormat format)
  39. {
  40. switch (format) {
  41. case AV_PIX_FMT_YUV420P:
  42. case AV_PIX_FMT_YUVJ420P:
  43. return AV_PIX_FMT_NV12;
  44. default:
  45. return AVERROR(ENOSYS);
  46. }
  47. }
  48. static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session,
  49. AVBufferRef *hw_frames_ref)
  50. {
  51. int ret;
  52. if (session) {
  53. q->session = session;
  54. } else if (hw_frames_ref) {
  55. if (q->internal_session) {
  56. MFXClose(q->internal_session);
  57. q->internal_session = NULL;
  58. }
  59. av_buffer_unref(&q->frames_ctx.hw_frames_ctx);
  60. q->frames_ctx.hw_frames_ctx = av_buffer_ref(hw_frames_ref);
  61. if (!q->frames_ctx.hw_frames_ctx)
  62. return AVERROR(ENOMEM);
  63. ret = ff_qsv_init_session_hwcontext(avctx, &q->internal_session,
  64. &q->frames_ctx, q->load_plugins,
  65. q->iopattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY);
  66. if (ret < 0) {
  67. av_buffer_unref(&q->frames_ctx.hw_frames_ctx);
  68. return ret;
  69. }
  70. q->session = q->internal_session;
  71. } else {
  72. if (!q->internal_session) {
  73. ret = ff_qsv_init_internal_session(avctx, &q->internal_session,
  74. q->load_plugins);
  75. if (ret < 0)
  76. return ret;
  77. }
  78. q->session = q->internal_session;
  79. }
  80. /* make sure the decoder is uninitialized */
  81. MFXVideoDECODE_Close(q->session);
  82. return 0;
  83. }
  84. static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q)
  85. {
  86. mfxSession session = NULL;
  87. int iopattern = 0;
  88. mfxVideoParam param = { { 0 } };
  89. int ret;
  90. if (!q->async_fifo) {
  91. q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
  92. (sizeof(mfxSyncPoint*) + sizeof(QSVFrame*)));
  93. if (!q->async_fifo)
  94. return AVERROR(ENOMEM);
  95. }
  96. if (avctx->hwaccel_context) {
  97. AVQSVContext *user_ctx = avctx->hwaccel_context;
  98. session = user_ctx->session;
  99. iopattern = user_ctx->iopattern;
  100. q->ext_buffers = user_ctx->ext_buffers;
  101. q->nb_ext_buffers = user_ctx->nb_ext_buffers;
  102. }
  103. if (avctx->hw_frames_ctx) {
  104. AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  105. AVQSVFramesContext *frames_hwctx = frames_ctx->hwctx;
  106. if (!iopattern) {
  107. if (frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
  108. iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
  109. else if (frames_hwctx->frame_type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
  110. iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
  111. }
  112. }
  113. if (!iopattern)
  114. iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
  115. q->iopattern = iopattern;
  116. ret = qsv_init_session(avctx, q, session, avctx->hw_frames_ctx);
  117. if (ret < 0) {
  118. av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
  119. return ret;
  120. }
  121. ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
  122. if (ret < 0)
  123. return ret;
  124. param.mfx.CodecId = ret;
  125. param.mfx.CodecProfile = avctx->profile;
  126. param.mfx.CodecLevel = avctx->level;
  127. param.mfx.FrameInfo.BitDepthLuma = 8;
  128. param.mfx.FrameInfo.BitDepthChroma = 8;
  129. param.mfx.FrameInfo.Shift = 0;
  130. param.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
  131. param.mfx.FrameInfo.Width = avctx->coded_width;
  132. param.mfx.FrameInfo.Height = avctx->coded_height;
  133. param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
  134. param.IOPattern = q->iopattern;
  135. param.AsyncDepth = q->async_depth;
  136. param.ExtParam = q->ext_buffers;
  137. param.NumExtParam = q->nb_ext_buffers;
  138. ret = MFXVideoDECODE_Init(q->session, &param);
  139. if (ret < 0) {
  140. av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder\n");
  141. return ff_qsv_error(ret);
  142. }
  143. return 0;
  144. }
  145. static int alloc_frame(AVCodecContext *avctx, QSVFrame *frame)
  146. {
  147. int ret;
  148. ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
  149. if (ret < 0)
  150. return ret;
  151. if (frame->frame->format == AV_PIX_FMT_QSV) {
  152. frame->surface = (mfxFrameSurface1*)frame->frame->data[3];
  153. } else {
  154. frame->surface_internal.Info.BitDepthLuma = 8;
  155. frame->surface_internal.Info.BitDepthChroma = 8;
  156. frame->surface_internal.Info.FourCC = MFX_FOURCC_NV12;
  157. frame->surface_internal.Info.Width = avctx->coded_width;
  158. frame->surface_internal.Info.Height = avctx->coded_height;
  159. frame->surface_internal.Info.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
  160. frame->surface_internal.Data.PitchLow = frame->frame->linesize[0];
  161. frame->surface_internal.Data.Y = frame->frame->data[0];
  162. frame->surface_internal.Data.UV = frame->frame->data[1];
  163. frame->surface = &frame->surface_internal;
  164. }
  165. return 0;
  166. }
  167. static void qsv_clear_unused_frames(QSVContext *q)
  168. {
  169. QSVFrame *cur = q->work_frames;
  170. while (cur) {
  171. if (cur->surface && !cur->surface->Data.Locked && !cur->queued) {
  172. cur->surface = NULL;
  173. av_frame_unref(cur->frame);
  174. }
  175. cur = cur->next;
  176. }
  177. }
  178. static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
  179. {
  180. QSVFrame *frame, **last;
  181. int ret;
  182. qsv_clear_unused_frames(q);
  183. frame = q->work_frames;
  184. last = &q->work_frames;
  185. while (frame) {
  186. if (!frame->surface) {
  187. ret = alloc_frame(avctx, frame);
  188. if (ret < 0)
  189. return ret;
  190. *surf = frame->surface;
  191. return 0;
  192. }
  193. last = &frame->next;
  194. frame = frame->next;
  195. }
  196. frame = av_mallocz(sizeof(*frame));
  197. if (!frame)
  198. return AVERROR(ENOMEM);
  199. frame->frame = av_frame_alloc();
  200. if (!frame->frame) {
  201. av_freep(&frame);
  202. return AVERROR(ENOMEM);
  203. }
  204. *last = frame;
  205. ret = alloc_frame(avctx, frame);
  206. if (ret < 0)
  207. return ret;
  208. *surf = frame->surface;
  209. return 0;
  210. }
  211. static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
  212. {
  213. QSVFrame *cur = q->work_frames;
  214. while (cur) {
  215. if (surf == cur->surface)
  216. return cur;
  217. cur = cur->next;
  218. }
  219. return NULL;
  220. }
  221. static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
  222. AVFrame *frame, int *got_frame,
  223. AVPacket *avpkt)
  224. {
  225. QSVFrame *out_frame;
  226. mfxFrameSurface1 *insurf;
  227. mfxFrameSurface1 *outsurf;
  228. mfxSyncPoint *sync;
  229. mfxBitstream bs = { { { 0 } } };
  230. int ret;
  231. if (avpkt->size) {
  232. bs.Data = avpkt->data;
  233. bs.DataLength = avpkt->size;
  234. bs.MaxLength = bs.DataLength;
  235. bs.TimeStamp = avpkt->pts;
  236. }
  237. sync = av_mallocz(sizeof(*sync));
  238. if (!sync) {
  239. av_freep(&sync);
  240. return AVERROR(ENOMEM);
  241. }
  242. do {
  243. ret = get_surface(avctx, q, &insurf);
  244. if (ret < 0)
  245. return ret;
  246. ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
  247. insurf, &outsurf, sync);
  248. if (ret == MFX_WRN_DEVICE_BUSY)
  249. av_usleep(1);
  250. } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
  251. if (ret != MFX_ERR_NONE &&
  252. ret != MFX_ERR_MORE_DATA &&
  253. ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
  254. ret != MFX_ERR_MORE_SURFACE) {
  255. av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n");
  256. av_freep(&sync);
  257. return ff_qsv_error(ret);
  258. }
  259. /* make sure we do not enter an infinite loop if the SDK
  260. * did not consume any data and did not return anything */
  261. if (!*sync && !bs.DataOffset) {
  262. av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n");
  263. bs.DataOffset = avpkt->size;
  264. }
  265. if (*sync) {
  266. QSVFrame *out_frame = find_frame(q, outsurf);
  267. if (!out_frame) {
  268. av_log(avctx, AV_LOG_ERROR,
  269. "The returned surface does not correspond to any frame\n");
  270. av_freep(&sync);
  271. return AVERROR_BUG;
  272. }
  273. out_frame->queued = 1;
  274. av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
  275. av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
  276. } else {
  277. av_freep(&sync);
  278. }
  279. if (!av_fifo_space(q->async_fifo) ||
  280. (!avpkt->size && av_fifo_size(q->async_fifo))) {
  281. AVFrame *src_frame;
  282. av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
  283. av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
  284. out_frame->queued = 0;
  285. do {
  286. ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
  287. } while (ret == MFX_WRN_IN_EXECUTION);
  288. av_freep(&sync);
  289. src_frame = out_frame->frame;
  290. ret = av_frame_ref(frame, src_frame);
  291. if (ret < 0)
  292. return ret;
  293. outsurf = out_frame->surface;
  294. #if FF_API_PKT_PTS
  295. FF_DISABLE_DEPRECATION_WARNINGS
  296. frame->pkt_pts = outsurf->Data.TimeStamp;
  297. FF_ENABLE_DEPRECATION_WARNINGS
  298. #endif
  299. frame->pts = outsurf->Data.TimeStamp;
  300. frame->repeat_pict =
  301. outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
  302. outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
  303. outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
  304. frame->top_field_first =
  305. outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
  306. frame->interlaced_frame =
  307. !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
  308. *got_frame = 1;
  309. }
  310. return bs.DataOffset;
  311. }
  312. int ff_qsv_decode_close(QSVContext *q)
  313. {
  314. QSVFrame *cur = q->work_frames;
  315. if (q->session)
  316. MFXVideoDECODE_Close(q->session);
  317. while (q->async_fifo && av_fifo_size(q->async_fifo)) {
  318. QSVFrame *out_frame;
  319. mfxSyncPoint *sync;
  320. av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
  321. av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
  322. av_freep(&sync);
  323. }
  324. while (cur) {
  325. q->work_frames = cur->next;
  326. av_frame_free(&cur->frame);
  327. av_freep(&cur);
  328. cur = q->work_frames;
  329. }
  330. av_fifo_free(q->async_fifo);
  331. q->async_fifo = NULL;
  332. av_parser_close(q->parser);
  333. avcodec_free_context(&q->avctx_internal);
  334. if (q->internal_session)
  335. MFXClose(q->internal_session);
  336. av_buffer_unref(&q->frames_ctx.hw_frames_ctx);
  337. av_freep(&q->frames_ctx.mids);
  338. q->frames_ctx.nb_mids = 0;
  339. return 0;
  340. }
  341. int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q,
  342. AVFrame *frame, int *got_frame, AVPacket *pkt)
  343. {
  344. uint8_t *dummy_data;
  345. int dummy_size;
  346. int ret;
  347. if (!q->avctx_internal) {
  348. q->avctx_internal = avcodec_alloc_context3(NULL);
  349. if (!q->avctx_internal)
  350. return AVERROR(ENOMEM);
  351. if (avctx->extradata) {
  352. q->avctx_internal->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
  353. if (!q->avctx_internal->extradata)
  354. return AVERROR(ENOMEM);
  355. memcpy(q->avctx_internal->extradata, avctx->extradata,
  356. avctx->extradata_size);
  357. q->avctx_internal->extradata_size = avctx->extradata_size;
  358. }
  359. q->parser = av_parser_init(avctx->codec_id);
  360. if (!q->parser)
  361. return AVERROR(ENOMEM);
  362. q->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
  363. q->orig_pix_fmt = AV_PIX_FMT_NONE;
  364. }
  365. if (!pkt->size)
  366. return qsv_decode(avctx, q, frame, got_frame, pkt);
  367. /* we assume the packets are already split properly and want
  368. * just the codec parameters here */
  369. av_parser_parse2(q->parser, q->avctx_internal,
  370. &dummy_data, &dummy_size,
  371. pkt->data, pkt->size, pkt->pts, pkt->dts,
  372. pkt->pos);
  373. /* TODO: flush delayed frames on reinit */
  374. if (q->parser->format != q->orig_pix_fmt ||
  375. q->parser->coded_width != avctx->coded_width ||
  376. q->parser->coded_height != avctx->coded_height) {
  377. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
  378. AV_PIX_FMT_NONE,
  379. AV_PIX_FMT_NONE };
  380. enum AVPixelFormat qsv_format;
  381. qsv_format = ff_qsv_map_pixfmt(q->parser->format);
  382. if (qsv_format < 0) {
  383. av_log(avctx, AV_LOG_ERROR,
  384. "Only 8-bit YUV420 streams are supported.\n");
  385. ret = AVERROR(ENOSYS);
  386. goto reinit_fail;
  387. }
  388. q->orig_pix_fmt = q->parser->format;
  389. avctx->pix_fmt = pix_fmts[1] = qsv_format;
  390. avctx->width = q->parser->width;
  391. avctx->height = q->parser->height;
  392. avctx->coded_width = q->parser->coded_width;
  393. avctx->coded_height = q->parser->coded_height;
  394. avctx->level = q->avctx_internal->level;
  395. avctx->profile = q->avctx_internal->profile;
  396. ret = ff_get_format(avctx, pix_fmts);
  397. if (ret < 0)
  398. goto reinit_fail;
  399. avctx->pix_fmt = ret;
  400. ret = qsv_decode_init(avctx, q);
  401. if (ret < 0)
  402. goto reinit_fail;
  403. }
  404. return qsv_decode(avctx, q, frame, got_frame, pkt);
  405. reinit_fail:
  406. q->orig_pix_fmt = q->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
  407. return ret;
  408. }
  409. void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q)
  410. {
  411. q->orig_pix_fmt = AV_PIX_FMT_NONE;
  412. }