You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

227 lines
8.5KB

  1. /*
  2. * OpenH264 video encoder
  3. * Copyright (C) 2014 Martin Storsjo
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <wels/codec_api.h>
  22. #include <wels/codec_ver.h>
  23. #include "libavutil/attributes.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/intreadwrite.h"
  27. #include "libavutil/mathematics.h"
  28. #include "avcodec.h"
  29. #include "internal.h"
  30. typedef struct SVCContext {
  31. const AVClass *av_class;
  32. ISVCEncoder *encoder;
  33. int slice_mode;
  34. int loopfilter;
  35. char *profile;
  36. } SVCContext;
  37. #define OFFSET(x) offsetof(SVCContext, x)
  38. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  39. static const AVOption options[] = {
  40. { "slice_mode", "Slice mode", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_AUTO_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE, "slice_mode" },
  41. { "fixed", "A fixed number of slices", 0, AV_OPT_TYPE_CONST, { .i64 = SM_FIXEDSLCNUM_SLICE }, 0, 0, VE, "slice_mode" },
  42. { "rowmb", "One slice per row of macroblocks", 0, AV_OPT_TYPE_CONST, { .i64 = SM_ROWMB_SLICE }, 0, 0, VE, "slice_mode" },
  43. { "auto", "Automatic number of slices according to number of threads", 0, AV_OPT_TYPE_CONST, { .i64 = SM_AUTO_SLICE }, 0, 0, VE, "slice_mode" },
  44. { "loopfilter", "Enable loop filter", OFFSET(loopfilter), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE },
  45. { "profile", "Set profile restrictions", OFFSET(profile), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
  46. { NULL }
  47. };
  48. static const AVClass class = {
  49. "libopenh264enc", av_default_item_name, options, LIBAVUTIL_VERSION_INT
  50. };
  51. static av_cold int svc_encode_close(AVCodecContext *avctx)
  52. {
  53. SVCContext *s = avctx->priv_data;
  54. if (s->encoder)
  55. WelsDestroySVCEncoder(s->encoder);
  56. return 0;
  57. }
  58. static av_cold int svc_encode_init(AVCodecContext *avctx)
  59. {
  60. SVCContext *s = avctx->priv_data;
  61. SEncParamExt param = { 0 };
  62. int err = AVERROR_UNKNOWN;
  63. // Mingw GCC < 4.7 on x86_32 uses an incorrect/buggy ABI for the WelsGetCodecVersion
  64. // function (for functions returning larger structs), thus skip the check in those
  65. // configurations.
  66. #if !defined(_WIN32) || !defined(__GNUC__) || !ARCH_X86_32 || AV_GCC_VERSION_AT_LEAST(4, 7)
  67. OpenH264Version libver = WelsGetCodecVersion();
  68. if (memcmp(&libver, &g_stCodecVersion, sizeof(libver))) {
  69. av_log(avctx, AV_LOG_ERROR, "Incorrect library version loaded\n");
  70. return AVERROR(EINVAL);
  71. }
  72. #endif
  73. if (WelsCreateSVCEncoder(&s->encoder)) {
  74. av_log(avctx, AV_LOG_ERROR, "Unable to create encoder\n");
  75. return AVERROR_UNKNOWN;
  76. }
  77. (*s->encoder)->GetDefaultParams(s->encoder, &param);
  78. param.fMaxFrameRate = avctx->time_base.den / avctx->time_base.num;
  79. param.iPicWidth = avctx->width;
  80. param.iPicHeight = avctx->height;
  81. param.iTargetBitrate = avctx->bit_rate;
  82. param.iMaxBitrate = FFMAX(avctx->rc_max_rate, avctx->bit_rate);
  83. param.iRCMode = RC_QUALITY_MODE;
  84. param.iTemporalLayerNum = 1;
  85. param.iSpatialLayerNum = 1;
  86. param.bEnableDenoise = 0;
  87. param.bEnableBackgroundDetection = 1;
  88. param.bEnableAdaptiveQuant = 1;
  89. param.bEnableFrameSkip = 0;
  90. param.bEnableLongTermReference = 0;
  91. param.iLtrMarkPeriod = 30;
  92. param.uiIntraPeriod = avctx->gop_size;
  93. param.bEnableSpsPpsIdAddition = 0;
  94. param.bPrefixNalAddingCtrl = 0;
  95. param.iLoopFilterDisableIdc = !s->loopfilter;
  96. param.iEntropyCodingModeFlag = 0;
  97. param.iMultipleThreadIdc = avctx->thread_count;
  98. if (s->profile && !strcmp(s->profile, "main"))
  99. param.iEntropyCodingModeFlag = 1;
  100. else if (!s->profile && avctx->coder_type == FF_CODER_TYPE_AC)
  101. param.iEntropyCodingModeFlag = 1;
  102. param.sSpatialLayers[0].iVideoWidth = param.iPicWidth;
  103. param.sSpatialLayers[0].iVideoHeight = param.iPicHeight;
  104. param.sSpatialLayers[0].fFrameRate = param.fMaxFrameRate;
  105. param.sSpatialLayers[0].iSpatialBitrate = param.iTargetBitrate;
  106. param.sSpatialLayers[0].iMaxSpatialBitrate = param.iMaxBitrate;
  107. if (avctx->slices > 1)
  108. s->slice_mode = SM_FIXEDSLCNUM_SLICE;
  109. param.sSpatialLayers[0].sSliceCfg.uiSliceMode = s->slice_mode;
  110. param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = avctx->slices;
  111. if ((*s->encoder)->InitializeExt(s->encoder, &param) != cmResultSuccess) {
  112. av_log(avctx, AV_LOG_ERROR, "Initialize failed\n");
  113. goto fail;
  114. }
  115. if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
  116. SFrameBSInfo fbi = { 0 };
  117. int i, size = 0;
  118. (*s->encoder)->EncodeParameterSets(s->encoder, &fbi);
  119. for (i = 0; i < fbi.sLayerInfo[0].iNalCount; i++)
  120. size += fbi.sLayerInfo[0].pNalLengthInByte[i];
  121. avctx->extradata = av_mallocz(size + FF_INPUT_BUFFER_PADDING_SIZE);
  122. if (!avctx->extradata) {
  123. err = AVERROR(ENOMEM);
  124. goto fail;
  125. }
  126. avctx->extradata_size = size;
  127. memcpy(avctx->extradata, fbi.sLayerInfo[0].pBsBuf, size);
  128. }
  129. return 0;
  130. fail:
  131. svc_encode_close(avctx);
  132. return err;
  133. }
  134. static int svc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  135. const AVFrame *frame, int *got_packet)
  136. {
  137. SVCContext *s = avctx->priv_data;
  138. SFrameBSInfo fbi = { 0 };
  139. int i, ret;
  140. int encoded;
  141. SSourcePicture sp = { 0 };
  142. int size = 0, layer, first_layer = 0;
  143. int layer_size[MAX_LAYER_NUM_OF_FRAME] = { 0 };
  144. sp.iColorFormat = videoFormatI420;
  145. for (i = 0; i < 3; i++) {
  146. sp.iStride[i] = frame->linesize[i];
  147. sp.pData[i] = frame->data[i];
  148. }
  149. sp.iPicWidth = avctx->width;
  150. sp.iPicHeight = avctx->height;
  151. encoded = (*s->encoder)->EncodeFrame(s->encoder, &sp, &fbi);
  152. if (encoded != cmResultSuccess) {
  153. av_log(avctx, AV_LOG_ERROR, "EncodeFrame failed\n");
  154. return AVERROR_UNKNOWN;
  155. }
  156. if (fbi.eFrameType == videoFrameTypeSkip) {
  157. av_log(avctx, AV_LOG_DEBUG, "frame skipped\n");
  158. return 0;
  159. }
  160. first_layer = 0;
  161. // Normal frames are returned with one single layer, while IDR
  162. // frames have two layers, where the first layer contains the SPS/PPS.
  163. // If using global headers, don't include the SPS/PPS in the returned
  164. // packet - thus, only return one layer.
  165. if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
  166. first_layer = fbi.iLayerNum - 1;
  167. for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
  168. for (i = 0; i < fbi.sLayerInfo[layer].iNalCount; i++)
  169. layer_size[layer] += fbi.sLayerInfo[layer].pNalLengthInByte[i];
  170. size += layer_size[layer];
  171. }
  172. av_log(avctx, AV_LOG_DEBUG, "%d slices\n", fbi.sLayerInfo[fbi.iLayerNum - 1].iNalCount);
  173. if ((ret = ff_alloc_packet(avpkt, size))) {
  174. av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
  175. return ret;
  176. }
  177. size = 0;
  178. for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
  179. memcpy(avpkt->data + size, fbi.sLayerInfo[layer].pBsBuf, layer_size[layer]);
  180. size += layer_size[layer];
  181. }
  182. avpkt->pts = frame->pts;
  183. if (fbi.eFrameType == videoFrameTypeIDR)
  184. avpkt->flags |= AV_PKT_FLAG_KEY;
  185. *got_packet = 1;
  186. return 0;
  187. }
  188. AVCodec ff_libopenh264_encoder = {
  189. .name = "libopenh264",
  190. .long_name = NULL_IF_CONFIG_SMALL("OpenH264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
  191. .type = AVMEDIA_TYPE_VIDEO,
  192. .id = AV_CODEC_ID_H264,
  193. .priv_data_size = sizeof(SVCContext),
  194. .init = svc_encode_init,
  195. .encode2 = svc_encode_frame,
  196. .close = svc_encode_close,
  197. .capabilities = CODEC_CAP_AUTO_THREADS,
  198. .pix_fmts = (const enum PixelFormat[]){ AV_PIX_FMT_YUV420P,
  199. AV_PIX_FMT_NONE },
  200. .priv_class = &class,
  201. };