You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

315 lines
9.5KB

  1. /*
  2. * Intel MediaSDK QSV based H.264 decoder
  3. *
  4. * copyright (c) 2013 Luca Barbato
  5. * copyright (c) 2015 Anton Khirnov
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <stdint.h>
  24. #include <string.h>
  25. #include <mfx/mfxvideo.h>
  26. #include "libavutil/common.h"
  27. #include "libavutil/fifo.h"
  28. #include "libavutil/opt.h"
  29. #include "avcodec.h"
  30. #include "internal.h"
  31. #include "qsv_internal.h"
  32. #include "qsvdec.h"
  33. #include "qsv.h"
  34. typedef struct QSVH264Context {
  35. AVClass *class;
  36. QSVContext qsv;
  37. // the internal parser and codec context for parsing the data
  38. AVCodecParserContext *parser;
  39. AVCodecContext *avctx_internal;
  40. enum AVPixelFormat orig_pix_fmt;
  41. // the filter for converting to Annex B
  42. AVBitStreamFilterContext *bsf;
  43. AVFifoBuffer *packet_fifo;
  44. AVPacket input_ref;
  45. AVPacket pkt_filtered;
  46. uint8_t *filtered_data;
  47. } QSVH264Context;
  48. static void qsv_clear_buffers(QSVH264Context *s)
  49. {
  50. AVPacket pkt;
  51. while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) {
  52. av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL);
  53. av_packet_unref(&pkt);
  54. }
  55. if (s->filtered_data != s->input_ref.data)
  56. av_freep(&s->filtered_data);
  57. s->filtered_data = NULL;
  58. av_packet_unref(&s->input_ref);
  59. }
  60. static av_cold int qsv_decode_close(AVCodecContext *avctx)
  61. {
  62. QSVH264Context *s = avctx->priv_data;
  63. ff_qsv_decode_close(&s->qsv);
  64. qsv_clear_buffers(s);
  65. av_fifo_free(s->packet_fifo);
  66. av_bitstream_filter_close(s->bsf);
  67. av_parser_close(s->parser);
  68. avcodec_free_context(&s->avctx_internal);
  69. return 0;
  70. }
  71. static av_cold int qsv_decode_init(AVCodecContext *avctx)
  72. {
  73. QSVH264Context *s = avctx->priv_data;
  74. int ret;
  75. s->orig_pix_fmt = AV_PIX_FMT_NONE;
  76. s->packet_fifo = av_fifo_alloc(sizeof(AVPacket));
  77. if (!s->packet_fifo) {
  78. ret = AVERROR(ENOMEM);
  79. goto fail;
  80. }
  81. s->bsf = av_bitstream_filter_init("h264_mp4toannexb");
  82. if (!s->bsf) {
  83. ret = AVERROR(ENOMEM);
  84. goto fail;
  85. }
  86. s->avctx_internal = avcodec_alloc_context3(NULL);
  87. if (!s->avctx_internal) {
  88. ret = AVERROR(ENOMEM);
  89. goto fail;
  90. }
  91. if (avctx->extradata) {
  92. s->avctx_internal->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  93. if (!s->avctx_internal->extradata) {
  94. ret = AVERROR(ENOMEM);
  95. goto fail;
  96. }
  97. memcpy(s->avctx_internal->extradata, avctx->extradata,
  98. avctx->extradata_size);
  99. s->avctx_internal->extradata_size = avctx->extradata_size;
  100. }
  101. s->parser = av_parser_init(AV_CODEC_ID_H264);
  102. if (!s->parser) {
  103. ret = AVERROR(ENOMEM);
  104. goto fail;
  105. }
  106. s->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
  107. s->qsv.iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
  108. return 0;
  109. fail:
  110. qsv_decode_close(avctx);
  111. return ret;
  112. }
  113. static int qsv_process_data(AVCodecContext *avctx, AVFrame *frame,
  114. int *got_frame, AVPacket *pkt)
  115. {
  116. QSVH264Context *s = avctx->priv_data;
  117. uint8_t *dummy_data;
  118. int dummy_size;
  119. int ret;
  120. /* we assume the packets are already split properly and want
  121. * just the codec parameters here */
  122. av_parser_parse2(s->parser, s->avctx_internal,
  123. &dummy_data, &dummy_size,
  124. pkt->data, pkt->size, pkt->pts, pkt->dts,
  125. pkt->pos);
  126. /* TODO: flush delayed frames on reinit */
  127. if (s->parser->format != s->orig_pix_fmt ||
  128. s->parser->coded_width != avctx->coded_width ||
  129. s->parser->coded_height != avctx->coded_height) {
  130. mfxSession session = NULL;
  131. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
  132. AV_PIX_FMT_NONE,
  133. AV_PIX_FMT_NONE };
  134. enum AVPixelFormat qsv_format;
  135. qsv_format = ff_qsv_map_pixfmt(s->parser->format);
  136. if (qsv_format < 0) {
  137. av_log(avctx, AV_LOG_ERROR,
  138. "Only 8-bit YUV420 streams are supported.\n");
  139. ret = AVERROR(ENOSYS);
  140. goto reinit_fail;
  141. }
  142. s->orig_pix_fmt = s->parser->format;
  143. avctx->pix_fmt = pix_fmts[1] = qsv_format;
  144. avctx->width = s->parser->width;
  145. avctx->height = s->parser->height;
  146. avctx->coded_width = s->parser->coded_width;
  147. avctx->coded_height = s->parser->coded_height;
  148. avctx->level = s->avctx_internal->level;
  149. avctx->profile = s->avctx_internal->profile;
  150. ret = ff_get_format(avctx, pix_fmts);
  151. if (ret < 0)
  152. goto reinit_fail;
  153. avctx->pix_fmt = ret;
  154. if (avctx->hwaccel_context) {
  155. AVQSVContext *user_ctx = avctx->hwaccel_context;
  156. session = user_ctx->session;
  157. s->qsv.iopattern = user_ctx->iopattern;
  158. s->qsv.ext_buffers = user_ctx->ext_buffers;
  159. s->qsv.nb_ext_buffers = user_ctx->nb_ext_buffers;
  160. }
  161. ret = ff_qsv_decode_init(avctx, &s->qsv, session);
  162. if (ret < 0)
  163. goto reinit_fail;
  164. }
  165. return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, &s->pkt_filtered);
  166. reinit_fail:
  167. s->orig_pix_fmt = s->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
  168. return ret;
  169. }
  170. static int qsv_decode_frame(AVCodecContext *avctx, void *data,
  171. int *got_frame, AVPacket *avpkt)
  172. {
  173. QSVH264Context *s = avctx->priv_data;
  174. AVFrame *frame = data;
  175. int ret;
  176. /* buffer the input packet */
  177. if (avpkt->size) {
  178. AVPacket input_ref = { 0 };
  179. if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) {
  180. ret = av_fifo_realloc2(s->packet_fifo,
  181. av_fifo_size(s->packet_fifo) + sizeof(input_ref));
  182. if (ret < 0)
  183. return ret;
  184. }
  185. ret = av_packet_ref(&input_ref, avpkt);
  186. if (ret < 0)
  187. return ret;
  188. av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL);
  189. }
  190. /* process buffered data */
  191. while (!*got_frame) {
  192. /* prepare the input data -- convert to Annex B if needed */
  193. if (s->pkt_filtered.size <= 0) {
  194. int size;
  195. /* no more data */
  196. if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket))
  197. return avpkt->size ? avpkt->size : ff_qsv_decode(avctx, &s->qsv, frame, got_frame, avpkt);
  198. if (s->filtered_data != s->input_ref.data)
  199. av_freep(&s->filtered_data);
  200. s->filtered_data = NULL;
  201. av_packet_unref(&s->input_ref);
  202. av_fifo_generic_read(s->packet_fifo, &s->input_ref, sizeof(s->input_ref), NULL);
  203. ret = av_bitstream_filter_filter(s->bsf, avctx, NULL,
  204. &s->filtered_data, &size,
  205. s->input_ref.data, s->input_ref.size, 0);
  206. if (ret < 0) {
  207. s->filtered_data = s->input_ref.data;
  208. size = s->input_ref.size;
  209. }
  210. s->pkt_filtered = s->input_ref;
  211. s->pkt_filtered.data = s->filtered_data;
  212. s->pkt_filtered.size = size;
  213. }
  214. ret = qsv_process_data(avctx, frame, got_frame, &s->pkt_filtered);
  215. if (ret < 0)
  216. return ret;
  217. s->pkt_filtered.size -= ret;
  218. s->pkt_filtered.data += ret;
  219. }
  220. return avpkt->size;
  221. }
  222. static void qsv_decode_flush(AVCodecContext *avctx)
  223. {
  224. QSVH264Context *s = avctx->priv_data;
  225. qsv_clear_buffers(s);
  226. s->orig_pix_fmt = AV_PIX_FMT_NONE;
  227. }
  228. AVHWAccel ff_h264_qsv_hwaccel = {
  229. .name = "h264_qsv",
  230. .type = AVMEDIA_TYPE_VIDEO,
  231. .id = AV_CODEC_ID_H264,
  232. .pix_fmt = AV_PIX_FMT_QSV,
  233. };
  234. #define OFFSET(x) offsetof(QSVH264Context, x)
  235. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  236. static const AVOption options[] = {
  237. { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 0, INT_MAX, VD },
  238. { NULL },
  239. };
  240. static const AVClass class = {
  241. .class_name = "h264_qsv",
  242. .item_name = av_default_item_name,
  243. .option = options,
  244. .version = LIBAVUTIL_VERSION_INT,
  245. };
  246. AVCodec ff_h264_qsv_decoder = {
  247. .name = "h264_qsv",
  248. .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration)"),
  249. .priv_data_size = sizeof(QSVH264Context),
  250. .type = AVMEDIA_TYPE_VIDEO,
  251. .id = AV_CODEC_ID_H264,
  252. .init = qsv_decode_init,
  253. .decode = qsv_decode_frame,
  254. .flush = qsv_decode_flush,
  255. .close = qsv_decode_close,
  256. .capabilities = CODEC_CAP_DELAY,
  257. .priv_class = &class,
  258. };