You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

314 lines
9.5KB

  1. /*
  2. * Intel MediaSDK QSV based H.264 decoder
  3. *
  4. * copyright (c) 2013 Luca Barbato
  5. * copyright (c) 2015 Anton Khirnov
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <stdint.h>
  24. #include <string.h>
  25. #include <mfx/mfxvideo.h>
  26. #include "libavutil/common.h"
  27. #include "libavutil/fifo.h"
  28. #include "libavutil/opt.h"
  29. #include "avcodec.h"
  30. #include "internal.h"
  31. #include "qsv_internal.h"
  32. #include "qsv.h"
  33. typedef struct QSVH264Context {
  34. AVClass *class;
  35. QSVContext qsv;
  36. // the internal parser and codec context for parsing the data
  37. AVCodecParserContext *parser;
  38. AVCodecContext *avctx_internal;
  39. enum AVPixelFormat orig_pix_fmt;
  40. // the filter for converting to Annex B
  41. AVBitStreamFilterContext *bsf;
  42. AVFifoBuffer *packet_fifo;
  43. AVPacket input_ref;
  44. AVPacket pkt_filtered;
  45. uint8_t *filtered_data;
  46. } QSVH264Context;
  47. static void qsv_clear_buffers(QSVH264Context *s)
  48. {
  49. AVPacket pkt;
  50. while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) {
  51. av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL);
  52. av_packet_unref(&pkt);
  53. }
  54. if (s->filtered_data != s->input_ref.data)
  55. av_freep(&s->filtered_data);
  56. s->filtered_data = NULL;
  57. av_packet_unref(&s->input_ref);
  58. }
  59. static av_cold int qsv_decode_close(AVCodecContext *avctx)
  60. {
  61. QSVH264Context *s = avctx->priv_data;
  62. ff_qsv_close(&s->qsv);
  63. qsv_clear_buffers(s);
  64. av_fifo_free(s->packet_fifo);
  65. av_bitstream_filter_close(s->bsf);
  66. av_parser_close(s->parser);
  67. avcodec_free_context(&s->avctx_internal);
  68. return 0;
  69. }
  70. static av_cold int qsv_decode_init(AVCodecContext *avctx)
  71. {
  72. QSVH264Context *s = avctx->priv_data;
  73. int ret;
  74. s->orig_pix_fmt = AV_PIX_FMT_NONE;
  75. s->packet_fifo = av_fifo_alloc(sizeof(AVPacket));
  76. if (!s->packet_fifo) {
  77. ret = AVERROR(ENOMEM);
  78. goto fail;
  79. }
  80. s->bsf = av_bitstream_filter_init("h264_mp4toannexb");
  81. if (!s->bsf) {
  82. ret = AVERROR(ENOMEM);
  83. goto fail;
  84. }
  85. s->avctx_internal = avcodec_alloc_context3(NULL);
  86. if (!s->avctx_internal) {
  87. ret = AVERROR(ENOMEM);
  88. goto fail;
  89. }
  90. if (avctx->extradata) {
  91. s->avctx_internal->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  92. if (!s->avctx_internal->extradata) {
  93. ret = AVERROR(ENOMEM);
  94. goto fail;
  95. }
  96. memcpy(s->avctx_internal->extradata, avctx->extradata,
  97. avctx->extradata_size);
  98. s->avctx_internal->extradata_size = avctx->extradata_size;
  99. }
  100. s->parser = av_parser_init(AV_CODEC_ID_H264);
  101. if (!s->parser) {
  102. ret = AVERROR(ENOMEM);
  103. goto fail;
  104. }
  105. s->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
  106. s->qsv.iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
  107. return 0;
  108. fail:
  109. qsv_decode_close(avctx);
  110. return ret;
  111. }
  112. static int qsv_process_data(AVCodecContext *avctx, AVFrame *frame,
  113. int *got_frame, AVPacket *pkt)
  114. {
  115. QSVH264Context *s = avctx->priv_data;
  116. uint8_t *dummy_data;
  117. int dummy_size;
  118. int ret;
  119. /* we assume the packets are already split properly and want
  120. * just the codec parameters here */
  121. av_parser_parse2(s->parser, s->avctx_internal,
  122. &dummy_data, &dummy_size,
  123. pkt->data, pkt->size, pkt->pts, pkt->dts,
  124. pkt->pos);
  125. /* TODO: flush delayed frames on reinit */
  126. if (s->parser->format != s->orig_pix_fmt ||
  127. s->parser->coded_width != avctx->coded_width ||
  128. s->parser->coded_height != avctx->coded_height) {
  129. mfxSession session = NULL;
  130. enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
  131. AV_PIX_FMT_NONE,
  132. AV_PIX_FMT_NONE };
  133. enum AVPixelFormat qsv_format;
  134. qsv_format = ff_qsv_map_pixfmt(s->parser->format);
  135. if (qsv_format < 0) {
  136. av_log(avctx, AV_LOG_ERROR,
  137. "Only 8-bit YUV420 streams are supported.\n");
  138. ret = AVERROR(ENOSYS);
  139. goto reinit_fail;
  140. }
  141. s->orig_pix_fmt = s->parser->format;
  142. avctx->pix_fmt = pix_fmts[1] = qsv_format;
  143. avctx->width = s->parser->width;
  144. avctx->height = s->parser->height;
  145. avctx->coded_width = s->parser->coded_width;
  146. avctx->coded_height = s->parser->coded_height;
  147. avctx->level = s->avctx_internal->level;
  148. avctx->profile = s->avctx_internal->profile;
  149. ret = ff_get_format(avctx, pix_fmts);
  150. if (ret < 0)
  151. goto reinit_fail;
  152. avctx->pix_fmt = ret;
  153. if (avctx->hwaccel_context) {
  154. AVQSVContext *user_ctx = avctx->hwaccel_context;
  155. session = user_ctx->session;
  156. s->qsv.iopattern = user_ctx->iopattern;
  157. s->qsv.ext_buffers = user_ctx->ext_buffers;
  158. s->qsv.nb_ext_buffers = user_ctx->nb_ext_buffers;
  159. }
  160. ret = ff_qsv_init(avctx, &s->qsv, session);
  161. if (ret < 0)
  162. goto reinit_fail;
  163. }
  164. return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, &s->pkt_filtered);
  165. reinit_fail:
  166. s->orig_pix_fmt = s->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
  167. return ret;
  168. }
  169. static int qsv_decode_frame(AVCodecContext *avctx, void *data,
  170. int *got_frame, AVPacket *avpkt)
  171. {
  172. QSVH264Context *s = avctx->priv_data;
  173. AVFrame *frame = data;
  174. int ret;
  175. /* buffer the input packet */
  176. if (avpkt->size) {
  177. AVPacket input_ref = { 0 };
  178. if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) {
  179. ret = av_fifo_realloc2(s->packet_fifo,
  180. av_fifo_size(s->packet_fifo) + sizeof(input_ref));
  181. if (ret < 0)
  182. return ret;
  183. }
  184. ret = av_packet_ref(&input_ref, avpkt);
  185. if (ret < 0)
  186. return ret;
  187. av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL);
  188. }
  189. /* process buffered data */
  190. while (!*got_frame) {
  191. /* prepare the input data -- convert to Annex B if needed */
  192. if (s->pkt_filtered.size <= 0) {
  193. int size;
  194. /* no more data */
  195. if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket))
  196. return avpkt->size ? avpkt->size : ff_qsv_decode(avctx, &s->qsv, frame, got_frame, avpkt);
  197. if (s->filtered_data != s->input_ref.data)
  198. av_freep(&s->filtered_data);
  199. s->filtered_data = NULL;
  200. av_packet_unref(&s->input_ref);
  201. av_fifo_generic_read(s->packet_fifo, &s->input_ref, sizeof(s->input_ref), NULL);
  202. ret = av_bitstream_filter_filter(s->bsf, avctx, NULL,
  203. &s->filtered_data, &size,
  204. s->input_ref.data, s->input_ref.size, 0);
  205. if (ret < 0) {
  206. s->filtered_data = s->input_ref.data;
  207. size = s->input_ref.size;
  208. }
  209. s->pkt_filtered = s->input_ref;
  210. s->pkt_filtered.data = s->filtered_data;
  211. s->pkt_filtered.size = size;
  212. }
  213. ret = qsv_process_data(avctx, frame, got_frame, &s->pkt_filtered);
  214. if (ret < 0)
  215. return ret;
  216. s->pkt_filtered.size -= ret;
  217. s->pkt_filtered.data += ret;
  218. }
  219. return avpkt->size;
  220. }
  221. static void qsv_decode_flush(AVCodecContext *avctx)
  222. {
  223. QSVH264Context *s = avctx->priv_data;
  224. qsv_clear_buffers(s);
  225. s->orig_pix_fmt = AV_PIX_FMT_NONE;
  226. }
  227. AVHWAccel ff_h264_qsv_hwaccel = {
  228. .name = "h264_qsv",
  229. .type = AVMEDIA_TYPE_VIDEO,
  230. .id = AV_CODEC_ID_H264,
  231. .pix_fmt = AV_PIX_FMT_QSV,
  232. };
  233. #define OFFSET(x) offsetof(QSVH264Context, x)
  234. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  235. static const AVOption options[] = {
  236. { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 0, INT_MAX, VD },
  237. { NULL },
  238. };
  239. static const AVClass class = {
  240. .class_name = "h264_qsv",
  241. .item_name = av_default_item_name,
  242. .option = options,
  243. .version = LIBAVUTIL_VERSION_INT,
  244. };
  245. AVCodec ff_h264_qsv_decoder = {
  246. .name = "h264_qsv",
  247. .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration)"),
  248. .priv_data_size = sizeof(QSVH264Context),
  249. .type = AVMEDIA_TYPE_VIDEO,
  250. .id = AV_CODEC_ID_H264,
  251. .init = qsv_decode_init,
  252. .decode = qsv_decode_frame,
  253. .flush = qsv_decode_flush,
  254. .close = qsv_decode_close,
  255. .capabilities = CODEC_CAP_DELAY,
  256. .priv_class = &class,
  257. };