You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

456 lines
13KB

  1. /*
  2. * Intel MediaSDK QSV codec-independent code
  3. *
  4. * copyright (c) 2013 Luca Barbato
  5. * copyright (c) 2015 Anton Khirnov <anton@khirnov.net>
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <string.h>
  24. #include <sys/types.h>
  25. #include <mfx/mfxvideo.h>
  26. #include "libavutil/common.h"
  27. #include "libavutil/mem.h"
  28. #include "libavutil/log.h"
  29. #include "libavutil/pixfmt.h"
  30. #include "libavutil/time.h"
  31. #include "avcodec.h"
  32. #include "internal.h"
  33. #include "qsv.h"
  34. #include "qsv_internal.h"
  35. #include "qsvdec.h"
  36. int ff_qsv_map_pixfmt(enum AVPixelFormat format)
  37. {
  38. switch (format) {
  39. case AV_PIX_FMT_YUV420P:
  40. case AV_PIX_FMT_YUVJ420P:
  41. return AV_PIX_FMT_NV12;
  42. default:
  43. return AVERROR(ENOSYS);
  44. }
  45. }
  46. static int qsv_init_session(AVCodecContext *avctx, QSVContext *q, mfxSession session)
  47. {
  48. if (!session) {
  49. if (!q->internal_session) {
  50. int ret = ff_qsv_init_internal_session(avctx, &q->internal_session,
  51. q->load_plugins);
  52. if (ret < 0)
  53. return ret;
  54. }
  55. q->session = q->internal_session;
  56. } else {
  57. q->session = session;
  58. }
  59. /* make sure the decoder is uninitialized */
  60. MFXVideoDECODE_Close(q->session);
  61. return 0;
  62. }
  63. static int qsv_decode_init(AVCodecContext *avctx, QSVContext *q, mfxSession session)
  64. {
  65. mfxVideoParam param = { { 0 } };
  66. int ret;
  67. if (!q->async_fifo) {
  68. q->async_fifo = av_fifo_alloc((1 + q->async_depth) *
  69. (sizeof(mfxSyncPoint*) + sizeof(QSVFrame*)));
  70. if (!q->async_fifo)
  71. return AVERROR(ENOMEM);
  72. }
  73. ret = qsv_init_session(avctx, q, session);
  74. if (ret < 0) {
  75. av_log(avctx, AV_LOG_ERROR, "Error initializing an MFX session\n");
  76. return ret;
  77. }
  78. ret = ff_qsv_codec_id_to_mfx(avctx->codec_id);
  79. if (ret < 0)
  80. return ret;
  81. param.mfx.CodecId = ret;
  82. param.mfx.CodecProfile = avctx->profile;
  83. param.mfx.CodecLevel = avctx->level;
  84. param.mfx.FrameInfo.BitDepthLuma = 8;
  85. param.mfx.FrameInfo.BitDepthChroma = 8;
  86. param.mfx.FrameInfo.Shift = 0;
  87. param.mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
  88. param.mfx.FrameInfo.Width = avctx->coded_width;
  89. param.mfx.FrameInfo.Height = avctx->coded_height;
  90. param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
  91. param.IOPattern = q->iopattern;
  92. param.AsyncDepth = q->async_depth;
  93. param.ExtParam = q->ext_buffers;
  94. param.NumExtParam = q->nb_ext_buffers;
  95. ret = MFXVideoDECODE_Init(q->session, &param);
  96. if (ret < 0) {
  97. av_log(avctx, AV_LOG_ERROR, "Error initializing the MFX video decoder\n");
  98. return ff_qsv_error(ret);
  99. }
  100. return 0;
  101. }
  102. static int alloc_frame(AVCodecContext *avctx, QSVFrame *frame)
  103. {
  104. int ret;
  105. ret = ff_get_buffer(avctx, frame->frame, AV_GET_BUFFER_FLAG_REF);
  106. if (ret < 0)
  107. return ret;
  108. if (frame->frame->format == AV_PIX_FMT_QSV) {
  109. frame->surface = (mfxFrameSurface1*)frame->frame->data[3];
  110. } else {
  111. frame->surface_internal.Info.BitDepthLuma = 8;
  112. frame->surface_internal.Info.BitDepthChroma = 8;
  113. frame->surface_internal.Info.FourCC = MFX_FOURCC_NV12;
  114. frame->surface_internal.Info.Width = avctx->coded_width;
  115. frame->surface_internal.Info.Height = avctx->coded_height;
  116. frame->surface_internal.Info.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
  117. frame->surface_internal.Data.PitchLow = frame->frame->linesize[0];
  118. frame->surface_internal.Data.Y = frame->frame->data[0];
  119. frame->surface_internal.Data.UV = frame->frame->data[1];
  120. frame->surface = &frame->surface_internal;
  121. }
  122. return 0;
  123. }
  124. static void qsv_clear_unused_frames(QSVContext *q)
  125. {
  126. QSVFrame *cur = q->work_frames;
  127. while (cur) {
  128. if (cur->surface && !cur->surface->Data.Locked && !cur->queued) {
  129. cur->surface = NULL;
  130. av_frame_unref(cur->frame);
  131. }
  132. cur = cur->next;
  133. }
  134. }
  135. static int get_surface(AVCodecContext *avctx, QSVContext *q, mfxFrameSurface1 **surf)
  136. {
  137. QSVFrame *frame, **last;
  138. int ret;
  139. qsv_clear_unused_frames(q);
  140. frame = q->work_frames;
  141. last = &q->work_frames;
  142. while (frame) {
  143. if (!frame->surface) {
  144. ret = alloc_frame(avctx, frame);
  145. if (ret < 0)
  146. return ret;
  147. *surf = frame->surface;
  148. return 0;
  149. }
  150. last = &frame->next;
  151. frame = frame->next;
  152. }
  153. frame = av_mallocz(sizeof(*frame));
  154. if (!frame)
  155. return AVERROR(ENOMEM);
  156. frame->frame = av_frame_alloc();
  157. if (!frame->frame) {
  158. av_freep(&frame);
  159. return AVERROR(ENOMEM);
  160. }
  161. *last = frame;
  162. ret = alloc_frame(avctx, frame);
  163. if (ret < 0)
  164. return ret;
  165. *surf = frame->surface;
  166. return 0;
  167. }
  168. static QSVFrame *find_frame(QSVContext *q, mfxFrameSurface1 *surf)
  169. {
  170. QSVFrame *cur = q->work_frames;
  171. while (cur) {
  172. if (surf == cur->surface)
  173. return cur;
  174. cur = cur->next;
  175. }
  176. return NULL;
  177. }
  178. static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
  179. AVFrame *frame, int *got_frame,
  180. AVPacket *avpkt)
  181. {
  182. QSVFrame *out_frame;
  183. mfxFrameSurface1 *insurf;
  184. mfxFrameSurface1 *outsurf;
  185. mfxSyncPoint *sync;
  186. mfxBitstream bs = { { { 0 } } };
  187. int ret;
  188. if (avpkt->size) {
  189. bs.Data = avpkt->data;
  190. bs.DataLength = avpkt->size;
  191. bs.MaxLength = bs.DataLength;
  192. bs.TimeStamp = avpkt->pts;
  193. }
  194. sync = av_mallocz(sizeof(*sync));
  195. if (!sync) {
  196. av_freep(&sync);
  197. return AVERROR(ENOMEM);
  198. }
  199. do {
  200. ret = get_surface(avctx, q, &insurf);
  201. if (ret < 0)
  202. return ret;
  203. ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
  204. insurf, &outsurf, sync);
  205. if (ret == MFX_WRN_DEVICE_BUSY)
  206. av_usleep(1);
  207. } while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
  208. if (ret != MFX_ERR_NONE &&
  209. ret != MFX_ERR_MORE_DATA &&
  210. ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
  211. ret != MFX_ERR_MORE_SURFACE) {
  212. av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n");
  213. av_freep(&sync);
  214. return ff_qsv_error(ret);
  215. }
  216. /* make sure we do not enter an infinite loop if the SDK
  217. * did not consume any data and did not return anything */
  218. if (!*sync && !bs.DataOffset) {
  219. av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n");
  220. bs.DataOffset = avpkt->size;
  221. }
  222. if (*sync) {
  223. QSVFrame *out_frame = find_frame(q, outsurf);
  224. if (!out_frame) {
  225. av_log(avctx, AV_LOG_ERROR,
  226. "The returned surface does not correspond to any frame\n");
  227. av_freep(&sync);
  228. return AVERROR_BUG;
  229. }
  230. out_frame->queued = 1;
  231. av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
  232. av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
  233. } else {
  234. av_freep(&sync);
  235. }
  236. if (!av_fifo_space(q->async_fifo) ||
  237. (!avpkt->size && av_fifo_size(q->async_fifo))) {
  238. AVFrame *src_frame;
  239. av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
  240. av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
  241. out_frame->queued = 0;
  242. do {
  243. ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
  244. } while (ret == MFX_WRN_IN_EXECUTION);
  245. av_freep(&sync);
  246. src_frame = out_frame->frame;
  247. ret = av_frame_ref(frame, src_frame);
  248. if (ret < 0)
  249. return ret;
  250. outsurf = out_frame->surface;
  251. frame->pkt_pts = frame->pts = outsurf->Data.TimeStamp;
  252. frame->repeat_pict =
  253. outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
  254. outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
  255. outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
  256. frame->top_field_first =
  257. outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
  258. frame->interlaced_frame =
  259. !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
  260. *got_frame = 1;
  261. }
  262. return bs.DataOffset;
  263. }
  264. int ff_qsv_decode_close(QSVContext *q)
  265. {
  266. QSVFrame *cur = q->work_frames;
  267. if (q->session)
  268. MFXVideoDECODE_Close(q->session);
  269. while (q->async_fifo && av_fifo_size(q->async_fifo)) {
  270. QSVFrame *out_frame;
  271. mfxSyncPoint *sync;
  272. av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
  273. av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
  274. av_freep(&sync);
  275. }
  276. while (cur) {
  277. q->work_frames = cur->next;
  278. av_frame_free(&cur->frame);
  279. av_freep(&cur);
  280. cur = q->work_frames;
  281. }
  282. av_fifo_free(q->async_fifo);
  283. q->async_fifo = NULL;
  284. av_parser_close(q->parser);
  285. avcodec_free_context(&q->avctx_internal);
  286. if (q->internal_session)
  287. MFXClose(q->internal_session);
  288. return 0;
  289. }
  290. int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q,
  291. AVFrame *frame, int *got_frame, AVPacket *pkt)
  292. {
  293. uint8_t *dummy_data;
  294. int dummy_size;
  295. int ret;
  296. if (!q->avctx_internal) {
  297. q->avctx_internal = avcodec_alloc_context3(NULL);
  298. if (!q->avctx_internal)
  299. return AVERROR(ENOMEM);
  300. if (avctx->extradata) {
  301. q->avctx_internal->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
  302. if (!q->avctx_internal->extradata)
  303. return AVERROR(ENOMEM);
  304. memcpy(q->avctx_internal->extradata, avctx->extradata,
  305. avctx->extradata_size);
  306. q->avctx_internal->extradata_size = avctx->extradata_size;
  307. }
  308. q->parser = av_parser_init(avctx->codec_id);
  309. if (!q->parser)
  310. return AVERROR(ENOMEM);
  311. q->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
  312. q->orig_pix_fmt = AV_PIX_FMT_NONE;
  313. }
  314. if (!pkt->size)
  315. return qsv_decode(avctx, q, frame, got_frame, pkt);
  316. /* we assume the packets are already split properly and want
  317. * just the codec parameters here */
  318. av_parser_parse2(q->parser, q->avctx_internal,
  319. &dummy_data, &dummy_size,
  320. pkt->data, pkt->size, pkt->pts, pkt->dts,
  321. pkt->pos);
  322. /* TODO: flush delayed frames on reinit */
  323. if (q->parser->format != q->orig_pix_fmt ||
  324. q->parser->coded_width != avctx->coded_width ||
  325. q->parser->coded_height != avctx->coded_height) {
  326. mfxSession session = NULL;
  327. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
  328. AV_PIX_FMT_NONE,
  329. AV_PIX_FMT_NONE };
  330. enum AVPixelFormat qsv_format;
  331. qsv_format = ff_qsv_map_pixfmt(q->parser->format);
  332. if (qsv_format < 0) {
  333. av_log(avctx, AV_LOG_ERROR,
  334. "Only 8-bit YUV420 streams are supported.\n");
  335. ret = AVERROR(ENOSYS);
  336. goto reinit_fail;
  337. }
  338. q->orig_pix_fmt = q->parser->format;
  339. avctx->pix_fmt = pix_fmts[1] = qsv_format;
  340. avctx->width = q->parser->width;
  341. avctx->height = q->parser->height;
  342. avctx->coded_width = q->parser->coded_width;
  343. avctx->coded_height = q->parser->coded_height;
  344. avctx->level = q->avctx_internal->level;
  345. avctx->profile = q->avctx_internal->profile;
  346. ret = ff_get_format(avctx, pix_fmts);
  347. if (ret < 0)
  348. goto reinit_fail;
  349. avctx->pix_fmt = ret;
  350. if (avctx->hwaccel_context) {
  351. AVQSVContext *user_ctx = avctx->hwaccel_context;
  352. session = user_ctx->session;
  353. q->iopattern = user_ctx->iopattern;
  354. q->ext_buffers = user_ctx->ext_buffers;
  355. q->nb_ext_buffers = user_ctx->nb_ext_buffers;
  356. }
  357. ret = qsv_decode_init(avctx, q, session);
  358. if (ret < 0)
  359. goto reinit_fail;
  360. }
  361. return qsv_decode(avctx, q, frame, got_frame, pkt);
  362. reinit_fail:
  363. q->orig_pix_fmt = q->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
  364. return ret;
  365. }
  366. void ff_qsv_decode_flush(AVCodecContext *avctx, QSVContext *q)
  367. {
  368. q->orig_pix_fmt = AV_PIX_FMT_NONE;
  369. }