You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

458 lines
19KB

  1. /*
  2. * OpenH264 video encoder
  3. * Copyright (C) 2014 Martin Storsjo
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <wels/codec_api.h>
  22. #include <wels/codec_ver.h>
  23. #include "libavutil/attributes.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/internal.h"
  27. #include "libavutil/intreadwrite.h"
  28. #include "libavutil/mathematics.h"
  29. #include "avcodec.h"
  30. #include "internal.h"
  31. #include "libopenh264.h"
  32. #if !OPENH264_VER_AT_LEAST(1, 6)
  33. #define SM_SIZELIMITED_SLICE SM_DYN_SLICE
  34. #endif
  35. #define TARGET_BITRATE_DEFAULT 2*1000*1000
  36. typedef struct SVCContext {
  37. const AVClass *av_class;
  38. ISVCEncoder *encoder;
  39. int slice_mode;
  40. int loopfilter;
  41. int profile;
  42. int max_nal_size;
  43. int skip_frames;
  44. int skipped;
  45. #if FF_API_OPENH264_CABAC
  46. int cabac; // deprecated
  47. #endif
  48. int coder;
  49. // rate control mode
  50. int rc_mode;
  51. } SVCContext;
  52. #define OFFSET(x) offsetof(SVCContext, x)
  53. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  54. #define DEPRECATED AV_OPT_FLAG_DEPRECATED
  55. static const AVOption options[] = {
  56. #if FF_API_OPENH264_SLICE_MODE
  57. #if OPENH264_VER_AT_LEAST(1, 6)
  58. { "slice_mode", "set slice mode, use slices/max_nal_size", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_FIXEDSLCNUM_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE|DEPRECATED, "slice_mode" },
  59. #else
  60. { "slice_mode", "set slice mode, use slices/max_nal_size", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_AUTO_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE|DEPRECATED, "slice_mode" },
  61. #endif
  62. { "fixed", "a fixed number of slices", 0, AV_OPT_TYPE_CONST, { .i64 = SM_FIXEDSLCNUM_SLICE }, 0, 0, VE, "slice_mode" },
  63. #if OPENH264_VER_AT_LEAST(1, 6)
  64. { "dyn", "Size limited (compatibility name)", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
  65. { "sizelimited", "Size limited", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
  66. #else
  67. { "rowmb", "one slice per row of macroblocks", 0, AV_OPT_TYPE_CONST, { .i64 = SM_ROWMB_SLICE }, 0, 0, VE, "slice_mode" },
  68. { "auto", "automatic number of slices according to number of threads", 0, AV_OPT_TYPE_CONST, { .i64 = SM_AUTO_SLICE }, 0, 0, VE, "slice_mode" },
  69. { "dyn", "Dynamic slicing", 0, AV_OPT_TYPE_CONST, { .i64 = SM_DYN_SLICE }, 0, 0, VE, "slice_mode" },
  70. #endif
  71. #endif
  72. { "loopfilter", "enable loop filter", OFFSET(loopfilter), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE },
  73. { "profile", "set profile restrictions", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = FF_PROFILE_UNKNOWN }, FF_PROFILE_UNKNOWN, 0xffff, VE, "profile" },
  74. #define PROFILE(name, value) name, NULL, 0, AV_OPT_TYPE_CONST, { .i64 = value }, 0, 0, VE, "profile"
  75. { PROFILE("constrained_baseline", FF_PROFILE_H264_CONSTRAINED_BASELINE) },
  76. { PROFILE("main", FF_PROFILE_H264_MAIN) },
  77. { PROFILE("high", FF_PROFILE_H264_HIGH) },
  78. #undef PROFILE
  79. { "max_nal_size", "set maximum NAL size in bytes", OFFSET(max_nal_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
  80. { "allow_skip_frames", "allow skipping frames to hit the target bitrate", OFFSET(skip_frames), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  81. #if FF_API_OPENH264_CABAC
  82. { "cabac", "Enable cabac(deprecated, use coder)", OFFSET(cabac), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE|DEPRECATED },
  83. #endif
  84. { "coder", "Coder type", OFFSET(coder), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE, "coder" },
  85. { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, INT_MIN, INT_MAX, VE, "coder" },
  86. { "cavlc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "coder" },
  87. { "cabac", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" },
  88. { "vlc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "coder" },
  89. { "ac", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "coder" },
  90. { "rc_mode", "Select rate control mode", OFFSET(rc_mode), AV_OPT_TYPE_INT, { .i64 = RC_QUALITY_MODE }, RC_OFF_MODE, RC_TIMESTAMP_MODE, VE, "rc_mode" },
  91. { "off", "bit rate control off", 0, AV_OPT_TYPE_CONST, { .i64 = RC_OFF_MODE }, 0, 0, VE, "rc_mode" },
  92. { "quality", "quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = RC_QUALITY_MODE }, 0, 0, VE, "rc_mode" },
  93. { "bitrate", "bitrate mode", 0, AV_OPT_TYPE_CONST, { .i64 = RC_BITRATE_MODE }, 0, 0, VE, "rc_mode" },
  94. { "buffer", "using buffer status to adjust the video quality (no bitrate control)", 0, AV_OPT_TYPE_CONST, { .i64 = RC_BUFFERBASED_MODE }, 0, 0, VE, "rc_mode" },
  95. #if OPENH264_VER_AT_LEAST(1, 4)
  96. { "timestamp", "bit rate control based on timestamp", 0, AV_OPT_TYPE_CONST, { .i64 = RC_TIMESTAMP_MODE }, 0, 0, VE, "rc_mode" },
  97. #endif
  98. { NULL }
  99. };
  100. static const AVClass class = {
  101. .class_name = "libopenh264enc",
  102. .item_name = av_default_item_name,
  103. .option = options,
  104. .version = LIBAVUTIL_VERSION_INT,
  105. };
  106. static av_cold int svc_encode_close(AVCodecContext *avctx)
  107. {
  108. SVCContext *s = avctx->priv_data;
  109. if (s->encoder)
  110. WelsDestroySVCEncoder(s->encoder);
  111. if (s->skipped > 0)
  112. av_log(avctx, AV_LOG_WARNING, "%d frames skipped\n", s->skipped);
  113. return 0;
  114. }
  115. static av_cold int svc_encode_init(AVCodecContext *avctx)
  116. {
  117. SVCContext *s = avctx->priv_data;
  118. SEncParamExt param = { 0 };
  119. int err;
  120. int log_level;
  121. WelsTraceCallback callback_function;
  122. AVCPBProperties *props;
  123. if ((err = ff_libopenh264_check_version(avctx)) < 0)
  124. return err;
  125. if (WelsCreateSVCEncoder(&s->encoder)) {
  126. av_log(avctx, AV_LOG_ERROR, "Unable to create encoder\n");
  127. return AVERROR_UNKNOWN;
  128. }
  129. // Pass all libopenh264 messages to our callback, to allow ourselves to filter them.
  130. log_level = WELS_LOG_DETAIL;
  131. (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_LEVEL, &log_level);
  132. // Set the logging callback function to one that uses av_log() (see implementation above).
  133. callback_function = (WelsTraceCallback) ff_libopenh264_trace_callback;
  134. (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK, &callback_function);
  135. // Set the AVCodecContext as the libopenh264 callback context so that it can be passed to av_log().
  136. (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK_CONTEXT, &avctx);
  137. (*s->encoder)->GetDefaultParams(s->encoder, &param);
  138. if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
  139. param.fMaxFrameRate = av_q2d(avctx->framerate);
  140. } else {
  141. if (avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
  142. av_log(avctx, AV_LOG_ERROR,
  143. "Could not set framerate for libopenh264enc: integer overflow\n");
  144. return AVERROR(EINVAL);
  145. }
  146. param.fMaxFrameRate = 1.0 / av_q2d(avctx->time_base) / FFMAX(avctx->ticks_per_frame, 1);
  147. }
  148. param.iPicWidth = avctx->width;
  149. param.iPicHeight = avctx->height;
  150. param.iTargetBitrate = avctx->bit_rate > 0 ? avctx->bit_rate : TARGET_BITRATE_DEFAULT;
  151. param.iMaxBitrate = FFMAX(avctx->rc_max_rate, avctx->bit_rate);
  152. param.iRCMode = s->rc_mode;
  153. if (avctx->qmax >= 0)
  154. param.iMaxQp = av_clip(avctx->qmax, 1, 51);
  155. if (avctx->qmin >= 0)
  156. param.iMinQp = av_clip(avctx->qmin, 1, param.iMaxQp);
  157. param.iTemporalLayerNum = 1;
  158. param.iSpatialLayerNum = 1;
  159. param.bEnableDenoise = 0;
  160. param.bEnableBackgroundDetection = 1;
  161. param.bEnableAdaptiveQuant = 1;
  162. param.bEnableFrameSkip = s->skip_frames;
  163. param.bEnableLongTermReference = 0;
  164. param.iLtrMarkPeriod = 30;
  165. if (avctx->gop_size >= 0)
  166. param.uiIntraPeriod = avctx->gop_size;
  167. #if OPENH264_VER_AT_LEAST(1, 4)
  168. param.eSpsPpsIdStrategy = CONSTANT_ID;
  169. #else
  170. param.bEnableSpsPpsIdAddition = 0;
  171. #endif
  172. param.bPrefixNalAddingCtrl = 0;
  173. param.iLoopFilterDisableIdc = !s->loopfilter;
  174. param.iEntropyCodingModeFlag = 0;
  175. param.iMultipleThreadIdc = avctx->thread_count;
  176. /* Allow specifying the libopenh264 profile through AVCodecContext. */
  177. if (FF_PROFILE_UNKNOWN == s->profile &&
  178. FF_PROFILE_UNKNOWN != avctx->profile)
  179. switch (avctx->profile) {
  180. case FF_PROFILE_H264_HIGH:
  181. case FF_PROFILE_H264_MAIN:
  182. case FF_PROFILE_H264_CONSTRAINED_BASELINE:
  183. s->profile = avctx->profile;
  184. break;
  185. default:
  186. av_log(avctx, AV_LOG_WARNING,
  187. "Unsupported avctx->profile: %d.\n", avctx->profile);
  188. break;
  189. }
  190. #if FF_API_CODER_TYPE && FF_API_OPENH264_CABAC
  191. FF_DISABLE_DEPRECATION_WARNINGS
  192. if (s->coder < 0 && avctx->coder_type == FF_CODER_TYPE_AC)
  193. s->coder = 1;
  194. if (s->coder < 0)
  195. s->coder = s->cabac;
  196. FF_ENABLE_DEPRECATION_WARNINGS
  197. #endif
  198. if (s->profile == FF_PROFILE_UNKNOWN && s->coder >= 0)
  199. s->profile = s->coder == 0 ? FF_PROFILE_H264_CONSTRAINED_BASELINE :
  200. #if OPENH264_VER_AT_LEAST(1, 8)
  201. FF_PROFILE_H264_HIGH;
  202. #else
  203. FF_PROFILE_H264_MAIN;
  204. #endif
  205. switch (s->profile) {
  206. #if OPENH264_VER_AT_LEAST(1, 8)
  207. case FF_PROFILE_H264_HIGH:
  208. param.iEntropyCodingModeFlag = 1;
  209. av_log(avctx, AV_LOG_VERBOSE, "Using CABAC, "
  210. "select EProfileIdc PRO_HIGH in libopenh264.\n");
  211. break;
  212. #else
  213. case FF_PROFILE_H264_MAIN:
  214. param.iEntropyCodingModeFlag = 1;
  215. av_log(avctx, AV_LOG_VERBOSE, "Using CABAC, "
  216. "select EProfileIdc PRO_MAIN in libopenh264.\n");
  217. break;
  218. #endif
  219. case FF_PROFILE_H264_CONSTRAINED_BASELINE:
  220. case FF_PROFILE_UNKNOWN:
  221. param.iEntropyCodingModeFlag = 0;
  222. av_log(avctx, AV_LOG_VERBOSE, "Using CAVLC, "
  223. "select EProfileIdc PRO_BASELINE in libopenh264.\n");
  224. break;
  225. default:
  226. param.iEntropyCodingModeFlag = 0;
  227. av_log(avctx, AV_LOG_WARNING, "Unsupported profile, "
  228. "select EProfileIdc PRO_BASELINE in libopenh264.\n");
  229. break;
  230. }
  231. param.sSpatialLayers[0].iVideoWidth = param.iPicWidth;
  232. param.sSpatialLayers[0].iVideoHeight = param.iPicHeight;
  233. param.sSpatialLayers[0].fFrameRate = param.fMaxFrameRate;
  234. param.sSpatialLayers[0].iSpatialBitrate = param.iTargetBitrate;
  235. param.sSpatialLayers[0].iMaxSpatialBitrate = param.iMaxBitrate;
  236. #if OPENH264_VER_AT_LEAST(1, 7)
  237. if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den) {
  238. // Table E-1.
  239. static const AVRational sar_idc[] = {
  240. { 0, 0 }, // Unspecified (never written here).
  241. { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 },
  242. { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 },
  243. { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 },
  244. { 160, 99 }, // Last 3 are unknown to openh264: { 4, 3 }, { 3, 2 }, { 2, 1 },
  245. };
  246. static const ESampleAspectRatio asp_idc[] = {
  247. ASP_UNSPECIFIED,
  248. ASP_1x1, ASP_12x11, ASP_10x11, ASP_16x11,
  249. ASP_40x33, ASP_24x11, ASP_20x11, ASP_32x11,
  250. ASP_80x33, ASP_18x11, ASP_15x11, ASP_64x33,
  251. ASP_160x99,
  252. };
  253. int num, den, i;
  254. av_reduce(&num, &den, avctx->sample_aspect_ratio.num,
  255. avctx->sample_aspect_ratio.den, 65535);
  256. for (i = 1; i < FF_ARRAY_ELEMS(sar_idc); i++) {
  257. if (num == sar_idc[i].num &&
  258. den == sar_idc[i].den)
  259. break;
  260. }
  261. if (i == FF_ARRAY_ELEMS(sar_idc)) {
  262. param.sSpatialLayers[0].eAspectRatio = ASP_EXT_SAR;
  263. param.sSpatialLayers[0].sAspectRatioExtWidth = num;
  264. param.sSpatialLayers[0].sAspectRatioExtHeight = den;
  265. } else {
  266. param.sSpatialLayers[0].eAspectRatio = asp_idc[i];
  267. }
  268. param.sSpatialLayers[0].bAspectRatioPresent = true;
  269. } else {
  270. param.sSpatialLayers[0].bAspectRatioPresent = false;
  271. }
  272. #endif
  273. if ((avctx->slices > 1) && (s->max_nal_size)) {
  274. av_log(avctx, AV_LOG_ERROR,
  275. "Invalid combination -slices %d and -max_nal_size %d.\n",
  276. avctx->slices, s->max_nal_size);
  277. return AVERROR(EINVAL);
  278. }
  279. if (avctx->slices > 1)
  280. s->slice_mode = SM_FIXEDSLCNUM_SLICE;
  281. if (s->max_nal_size)
  282. s->slice_mode = SM_SIZELIMITED_SLICE;
  283. #if OPENH264_VER_AT_LEAST(1, 6)
  284. param.sSpatialLayers[0].sSliceArgument.uiSliceMode = s->slice_mode;
  285. param.sSpatialLayers[0].sSliceArgument.uiSliceNum = avctx->slices;
  286. #else
  287. param.sSpatialLayers[0].sSliceCfg.uiSliceMode = s->slice_mode;
  288. param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = avctx->slices;
  289. #endif
  290. if (avctx->slices == 0 && s->slice_mode == SM_FIXEDSLCNUM_SLICE)
  291. av_log(avctx, AV_LOG_WARNING, "Slice count will be set automatically\n");
  292. if (s->slice_mode == SM_SIZELIMITED_SLICE) {
  293. if (s->max_nal_size) {
  294. param.uiMaxNalSize = s->max_nal_size;
  295. #if OPENH264_VER_AT_LEAST(1, 6)
  296. param.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
  297. #else
  298. param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
  299. #endif
  300. } else {
  301. av_log(avctx, AV_LOG_ERROR, "Invalid -max_nal_size, "
  302. "specify a valid max_nal_size to use -slice_mode dyn\n");
  303. return AVERROR(EINVAL);
  304. }
  305. }
  306. if ((*s->encoder)->InitializeExt(s->encoder, &param) != cmResultSuccess) {
  307. av_log(avctx, AV_LOG_ERROR, "Initialize failed\n");
  308. return AVERROR_UNKNOWN;
  309. }
  310. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  311. SFrameBSInfo fbi = { 0 };
  312. int i, size = 0;
  313. (*s->encoder)->EncodeParameterSets(s->encoder, &fbi);
  314. for (i = 0; i < fbi.sLayerInfo[0].iNalCount; i++)
  315. size += fbi.sLayerInfo[0].pNalLengthInByte[i];
  316. avctx->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
  317. if (!avctx->extradata)
  318. return AVERROR(ENOMEM);
  319. avctx->extradata_size = size;
  320. memcpy(avctx->extradata, fbi.sLayerInfo[0].pBsBuf, size);
  321. }
  322. props = ff_add_cpb_side_data(avctx);
  323. if (!props)
  324. return AVERROR(ENOMEM);
  325. props->max_bitrate = param.iMaxBitrate;
  326. props->avg_bitrate = param.iTargetBitrate;
  327. return 0;
  328. }
  329. static int svc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
  330. const AVFrame *frame, int *got_packet)
  331. {
  332. SVCContext *s = avctx->priv_data;
  333. SFrameBSInfo fbi = { 0 };
  334. int i, ret;
  335. int encoded;
  336. SSourcePicture sp = { 0 };
  337. int size = 0, layer, first_layer = 0;
  338. int layer_size[MAX_LAYER_NUM_OF_FRAME] = { 0 };
  339. sp.iColorFormat = videoFormatI420;
  340. for (i = 0; i < 3; i++) {
  341. sp.iStride[i] = frame->linesize[i];
  342. sp.pData[i] = frame->data[i];
  343. }
  344. sp.iPicWidth = avctx->width;
  345. sp.iPicHeight = avctx->height;
  346. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  347. (*s->encoder)->ForceIntraFrame(s->encoder, true);
  348. }
  349. encoded = (*s->encoder)->EncodeFrame(s->encoder, &sp, &fbi);
  350. if (encoded != cmResultSuccess) {
  351. av_log(avctx, AV_LOG_ERROR, "EncodeFrame failed\n");
  352. return AVERROR_UNKNOWN;
  353. }
  354. if (fbi.eFrameType == videoFrameTypeSkip) {
  355. s->skipped++;
  356. av_log(avctx, AV_LOG_DEBUG, "frame skipped\n");
  357. return 0;
  358. }
  359. first_layer = 0;
  360. // Normal frames are returned with one single layer, while IDR
  361. // frames have two layers, where the first layer contains the SPS/PPS.
  362. // If using global headers, don't include the SPS/PPS in the returned
  363. // packet - thus, only return one layer.
  364. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
  365. first_layer = fbi.iLayerNum - 1;
  366. for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
  367. for (i = 0; i < fbi.sLayerInfo[layer].iNalCount; i++)
  368. layer_size[layer] += fbi.sLayerInfo[layer].pNalLengthInByte[i];
  369. size += layer_size[layer];
  370. }
  371. av_log(avctx, AV_LOG_DEBUG, "%d slices\n", fbi.sLayerInfo[fbi.iLayerNum - 1].iNalCount);
  372. if ((ret = ff_alloc_packet2(avctx, avpkt, size, size))) {
  373. av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
  374. return ret;
  375. }
  376. size = 0;
  377. for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
  378. memcpy(avpkt->data + size, fbi.sLayerInfo[layer].pBsBuf, layer_size[layer]);
  379. size += layer_size[layer];
  380. }
  381. avpkt->pts = frame->pts;
  382. if (fbi.eFrameType == videoFrameTypeIDR)
  383. avpkt->flags |= AV_PKT_FLAG_KEY;
  384. *got_packet = 1;
  385. return 0;
  386. }
  387. static const AVCodecDefault svc_enc_defaults[] = {
  388. { "b", "0" },
  389. { "g", "-1" },
  390. { "qmin", "-1" },
  391. { "qmax", "-1" },
  392. { NULL },
  393. };
  394. AVCodec ff_libopenh264_encoder = {
  395. .name = "libopenh264",
  396. .long_name = NULL_IF_CONFIG_SMALL("OpenH264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
  397. .type = AVMEDIA_TYPE_VIDEO,
  398. .id = AV_CODEC_ID_H264,
  399. .priv_data_size = sizeof(SVCContext),
  400. .init = svc_encode_init,
  401. .encode2 = svc_encode_frame,
  402. .close = svc_encode_close,
  403. .capabilities = AV_CODEC_CAP_AUTO_THREADS,
  404. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
  405. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
  406. AV_PIX_FMT_NONE },
  407. .defaults = svc_enc_defaults,
  408. .priv_class = &class,
  409. .wrapper_name = "libopenh264",
  410. };