You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2617 lines
83KB

  1. /*
  2. * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <VideoToolbox/VideoToolbox.h>
  21. #include <CoreVideo/CoreVideo.h>
  22. #include <CoreMedia/CoreMedia.h>
  23. #include <TargetConditionals.h>
  24. #include <Availability.h>
  25. #include "avcodec.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/avstring.h"
  29. #include "libavcodec/avcodec.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "internal.h"
  32. #include <pthread.h>
  33. #include "h264.h"
  34. #include "h264_sei.h"
  35. #include <dlfcn.h>
  36. #if !HAVE_KCMVIDEOCODECTYPE_HEVC
  37. enum { kCMVideoCodecType_HEVC = 'hvc1' };
  38. #endif
  39. typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
  40. size_t parameterSetIndex,
  41. const uint8_t * _Nullable *parameterSetPointerOut,
  42. size_t *parameterSetSizeOut,
  43. size_t *parameterSetCountOut,
  44. int *NALUnitHeaderLengthOut);
  45. //These symbols may not be present
  46. static struct{
  47. CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
  48. CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
  49. CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
  50. CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
  51. CFStringRef kVTH264EntropyMode_CAVLC;
  52. CFStringRef kVTH264EntropyMode_CABAC;
  53. CFStringRef kVTProfileLevel_H264_Baseline_4_0;
  54. CFStringRef kVTProfileLevel_H264_Baseline_4_2;
  55. CFStringRef kVTProfileLevel_H264_Baseline_5_0;
  56. CFStringRef kVTProfileLevel_H264_Baseline_5_1;
  57. CFStringRef kVTProfileLevel_H264_Baseline_5_2;
  58. CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
  59. CFStringRef kVTProfileLevel_H264_Main_4_2;
  60. CFStringRef kVTProfileLevel_H264_Main_5_1;
  61. CFStringRef kVTProfileLevel_H264_Main_5_2;
  62. CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
  63. CFStringRef kVTProfileLevel_H264_High_3_0;
  64. CFStringRef kVTProfileLevel_H264_High_3_1;
  65. CFStringRef kVTProfileLevel_H264_High_3_2;
  66. CFStringRef kVTProfileLevel_H264_High_4_0;
  67. CFStringRef kVTProfileLevel_H264_High_4_1;
  68. CFStringRef kVTProfileLevel_H264_High_4_2;
  69. CFStringRef kVTProfileLevel_H264_High_5_1;
  70. CFStringRef kVTProfileLevel_H264_High_5_2;
  71. CFStringRef kVTProfileLevel_H264_High_AutoLevel;
  72. CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
  73. CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
  74. CFStringRef kVTCompressionPropertyKey_RealTime;
  75. CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
  76. CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
  77. getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
  78. } compat_keys;
  79. #define GET_SYM(symbol, defaultVal) \
  80. do{ \
  81. CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
  82. if(!handle) \
  83. compat_keys.symbol = CFSTR(defaultVal); \
  84. else \
  85. compat_keys.symbol = *handle; \
  86. }while(0)
  87. static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
  88. static void loadVTEncSymbols(){
  89. compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
  90. (getParameterSetAtIndex)dlsym(
  91. RTLD_DEFAULT,
  92. "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
  93. );
  94. GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020, "ITU_R_2020");
  95. GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
  96. GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020, "ITU_R_2020");
  97. GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
  98. GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
  99. GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
  100. GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
  101. GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
  102. GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
  103. GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
  104. GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
  105. GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
  106. GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
  107. GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
  108. GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
  109. GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
  110. GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
  111. GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
  112. GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
  113. GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
  114. GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
  115. GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
  116. GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
  117. GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
  118. GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
  119. GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
  120. GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
  121. GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
  122. GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
  123. "EnableHardwareAcceleratedVideoEncoder");
  124. GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
  125. "RequireHardwareAcceleratedVideoEncoder");
  126. }
  127. typedef enum VT_H264Profile {
  128. H264_PROF_AUTO,
  129. H264_PROF_BASELINE,
  130. H264_PROF_MAIN,
  131. H264_PROF_HIGH,
  132. H264_PROF_COUNT
  133. } VT_H264Profile;
  134. typedef enum VTH264Entropy{
  135. VT_ENTROPY_NOT_SET,
  136. VT_CAVLC,
  137. VT_CABAC
  138. } VTH264Entropy;
  139. typedef enum VT_HEVCProfile {
  140. HEVC_PROF_AUTO,
  141. HEVC_PROF_MAIN,
  142. HEVC_PROF_MAIN10,
  143. HEVC_PROF_COUNT
  144. } VT_HEVCProfile;
  145. static const uint8_t start_code[] = { 0, 0, 0, 1 };
  146. typedef struct ExtraSEI {
  147. void *data;
  148. size_t size;
  149. } ExtraSEI;
  150. typedef struct BufNode {
  151. CMSampleBufferRef cm_buffer;
  152. ExtraSEI *sei;
  153. struct BufNode* next;
  154. int error;
  155. } BufNode;
  156. typedef struct VTEncContext {
  157. AVClass *class;
  158. enum AVCodecID codec_id;
  159. VTCompressionSessionRef session;
  160. CFStringRef ycbcr_matrix;
  161. CFStringRef color_primaries;
  162. CFStringRef transfer_function;
  163. getParameterSetAtIndex get_param_set_func;
  164. pthread_mutex_t lock;
  165. pthread_cond_t cv_sample_sent;
  166. int async_error;
  167. BufNode *q_head;
  168. BufNode *q_tail;
  169. int64_t frame_ct_out;
  170. int64_t frame_ct_in;
  171. int64_t first_pts;
  172. int64_t dts_delta;
  173. int64_t profile;
  174. int64_t level;
  175. int64_t entropy;
  176. int64_t realtime;
  177. int64_t frames_before;
  178. int64_t frames_after;
  179. int64_t allow_sw;
  180. bool flushing;
  181. bool has_b_frames;
  182. bool warned_color_range;
  183. bool a53_cc;
  184. } VTEncContext;
  185. static int vtenc_populate_extradata(AVCodecContext *avctx,
  186. CMVideoCodecType codec_type,
  187. CFStringRef profile_level,
  188. CFNumberRef gamma_level,
  189. CFDictionaryRef enc_info,
  190. CFDictionaryRef pixel_buffer_info);
  191. /**
  192. * NULL-safe release of *refPtr, and sets value to NULL.
  193. */
  194. static void vt_release_num(CFNumberRef* refPtr){
  195. if (!*refPtr) {
  196. return;
  197. }
  198. CFRelease(*refPtr);
  199. *refPtr = NULL;
  200. }
  201. static void set_async_error(VTEncContext *vtctx, int err)
  202. {
  203. BufNode *info;
  204. pthread_mutex_lock(&vtctx->lock);
  205. vtctx->async_error = err;
  206. info = vtctx->q_head;
  207. vtctx->q_head = vtctx->q_tail = NULL;
  208. while (info) {
  209. BufNode *next = info->next;
  210. CFRelease(info->cm_buffer);
  211. av_free(info);
  212. info = next;
  213. }
  214. pthread_mutex_unlock(&vtctx->lock);
  215. }
  216. static void clear_frame_queue(VTEncContext *vtctx)
  217. {
  218. set_async_error(vtctx, 0);
  219. }
  220. static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
  221. {
  222. BufNode *info;
  223. pthread_mutex_lock(&vtctx->lock);
  224. if (vtctx->async_error) {
  225. pthread_mutex_unlock(&vtctx->lock);
  226. return vtctx->async_error;
  227. }
  228. if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
  229. *buf = NULL;
  230. pthread_mutex_unlock(&vtctx->lock);
  231. return 0;
  232. }
  233. while (!vtctx->q_head && !vtctx->async_error && wait) {
  234. pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
  235. }
  236. if (!vtctx->q_head) {
  237. pthread_mutex_unlock(&vtctx->lock);
  238. *buf = NULL;
  239. return 0;
  240. }
  241. info = vtctx->q_head;
  242. vtctx->q_head = vtctx->q_head->next;
  243. if (!vtctx->q_head) {
  244. vtctx->q_tail = NULL;
  245. }
  246. pthread_mutex_unlock(&vtctx->lock);
  247. *buf = info->cm_buffer;
  248. if (sei && *buf) {
  249. *sei = info->sei;
  250. } else if (info->sei) {
  251. if (info->sei->data) av_free(info->sei->data);
  252. av_free(info->sei);
  253. }
  254. av_free(info);
  255. vtctx->frame_ct_out++;
  256. return 0;
  257. }
  258. static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
  259. {
  260. BufNode *info = av_malloc(sizeof(BufNode));
  261. if (!info) {
  262. set_async_error(vtctx, AVERROR(ENOMEM));
  263. return;
  264. }
  265. CFRetain(buffer);
  266. info->cm_buffer = buffer;
  267. info->sei = sei;
  268. info->next = NULL;
  269. pthread_mutex_lock(&vtctx->lock);
  270. pthread_cond_signal(&vtctx->cv_sample_sent);
  271. if (!vtctx->q_head) {
  272. vtctx->q_head = info;
  273. } else {
  274. vtctx->q_tail->next = info;
  275. }
  276. vtctx->q_tail = info;
  277. pthread_mutex_unlock(&vtctx->lock);
  278. }
  279. static int count_nalus(size_t length_code_size,
  280. CMSampleBufferRef sample_buffer,
  281. int *count)
  282. {
  283. size_t offset = 0;
  284. int status;
  285. int nalu_ct = 0;
  286. uint8_t size_buf[4];
  287. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  288. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  289. if (length_code_size > 4)
  290. return AVERROR_INVALIDDATA;
  291. while (offset < src_size) {
  292. size_t curr_src_len;
  293. size_t box_len = 0;
  294. size_t i;
  295. status = CMBlockBufferCopyDataBytes(block,
  296. offset,
  297. length_code_size,
  298. size_buf);
  299. for (i = 0; i < length_code_size; i++) {
  300. box_len <<= 8;
  301. box_len |= size_buf[i];
  302. }
  303. curr_src_len = box_len + length_code_size;
  304. offset += curr_src_len;
  305. nalu_ct++;
  306. }
  307. *count = nalu_ct;
  308. return 0;
  309. }
  310. static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
  311. {
  312. switch (id) {
  313. case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
  314. case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
  315. default: return 0;
  316. }
  317. }
  318. /**
  319. * Get the parameter sets from a CMSampleBufferRef.
  320. * @param dst If *dst isn't NULL, the parameters are copied into existing
  321. * memory. *dst_size must be set accordingly when *dst != NULL.
  322. * If *dst is NULL, it will be allocated.
  323. * In all cases, *dst_size is set to the number of bytes used starting
  324. * at *dst.
  325. */
  326. static int get_params_size(
  327. AVCodecContext *avctx,
  328. CMVideoFormatDescriptionRef vid_fmt,
  329. size_t *size)
  330. {
  331. VTEncContext *vtctx = avctx->priv_data;
  332. size_t total_size = 0;
  333. size_t ps_count;
  334. int is_count_bad = 0;
  335. size_t i;
  336. int status;
  337. status = vtctx->get_param_set_func(vid_fmt,
  338. 0,
  339. NULL,
  340. NULL,
  341. &ps_count,
  342. NULL);
  343. if (status) {
  344. is_count_bad = 1;
  345. ps_count = 0;
  346. status = 0;
  347. }
  348. for (i = 0; i < ps_count || is_count_bad; i++) {
  349. const uint8_t *ps;
  350. size_t ps_size;
  351. status = vtctx->get_param_set_func(vid_fmt,
  352. i,
  353. &ps,
  354. &ps_size,
  355. NULL,
  356. NULL);
  357. if (status) {
  358. /*
  359. * When ps_count is invalid, status != 0 ends the loop normally
  360. * unless we didn't get any parameter sets.
  361. */
  362. if (i > 0 && is_count_bad) status = 0;
  363. break;
  364. }
  365. total_size += ps_size + sizeof(start_code);
  366. }
  367. if (status) {
  368. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
  369. return AVERROR_EXTERNAL;
  370. }
  371. *size = total_size;
  372. return 0;
  373. }
  374. static int copy_param_sets(
  375. AVCodecContext *avctx,
  376. CMVideoFormatDescriptionRef vid_fmt,
  377. uint8_t *dst,
  378. size_t dst_size)
  379. {
  380. VTEncContext *vtctx = avctx->priv_data;
  381. size_t ps_count;
  382. int is_count_bad = 0;
  383. int status;
  384. size_t offset = 0;
  385. size_t i;
  386. status = vtctx->get_param_set_func(vid_fmt,
  387. 0,
  388. NULL,
  389. NULL,
  390. &ps_count,
  391. NULL);
  392. if (status) {
  393. is_count_bad = 1;
  394. ps_count = 0;
  395. status = 0;
  396. }
  397. for (i = 0; i < ps_count || is_count_bad; i++) {
  398. const uint8_t *ps;
  399. size_t ps_size;
  400. size_t next_offset;
  401. status = vtctx->get_param_set_func(vid_fmt,
  402. i,
  403. &ps,
  404. &ps_size,
  405. NULL,
  406. NULL);
  407. if (status) {
  408. if (i > 0 && is_count_bad) status = 0;
  409. break;
  410. }
  411. next_offset = offset + sizeof(start_code) + ps_size;
  412. if (dst_size < next_offset) {
  413. av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
  414. return AVERROR_BUFFER_TOO_SMALL;
  415. }
  416. memcpy(dst + offset, start_code, sizeof(start_code));
  417. offset += sizeof(start_code);
  418. memcpy(dst + offset, ps, ps_size);
  419. offset = next_offset;
  420. }
  421. if (status) {
  422. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
  423. return AVERROR_EXTERNAL;
  424. }
  425. return 0;
  426. }
  427. static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
  428. {
  429. CMVideoFormatDescriptionRef vid_fmt;
  430. size_t total_size;
  431. int status;
  432. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  433. if (!vid_fmt) {
  434. av_log(avctx, AV_LOG_ERROR, "No video format.\n");
  435. return AVERROR_EXTERNAL;
  436. }
  437. status = get_params_size(avctx, vid_fmt, &total_size);
  438. if (status) {
  439. av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
  440. return status;
  441. }
  442. avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
  443. if (!avctx->extradata) {
  444. return AVERROR(ENOMEM);
  445. }
  446. avctx->extradata_size = total_size;
  447. status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
  448. if (status) {
  449. av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
  450. return status;
  451. }
  452. return 0;
  453. }
  454. static void vtenc_output_callback(
  455. void *ctx,
  456. void *sourceFrameCtx,
  457. OSStatus status,
  458. VTEncodeInfoFlags flags,
  459. CMSampleBufferRef sample_buffer)
  460. {
  461. AVCodecContext *avctx = ctx;
  462. VTEncContext *vtctx = avctx->priv_data;
  463. ExtraSEI *sei = sourceFrameCtx;
  464. if (vtctx->async_error) {
  465. if(sample_buffer) CFRelease(sample_buffer);
  466. return;
  467. }
  468. if (status || !sample_buffer) {
  469. av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
  470. set_async_error(vtctx, AVERROR_EXTERNAL);
  471. return;
  472. }
  473. if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  474. int set_status = set_extradata(avctx, sample_buffer);
  475. if (set_status) {
  476. set_async_error(vtctx, set_status);
  477. return;
  478. }
  479. }
  480. vtenc_q_push(vtctx, sample_buffer, sei);
  481. }
  482. static int get_length_code_size(
  483. AVCodecContext *avctx,
  484. CMSampleBufferRef sample_buffer,
  485. size_t *size)
  486. {
  487. VTEncContext *vtctx = avctx->priv_data;
  488. CMVideoFormatDescriptionRef vid_fmt;
  489. int isize;
  490. int status;
  491. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  492. if (!vid_fmt) {
  493. av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
  494. return AVERROR_EXTERNAL;
  495. }
  496. status = vtctx->get_param_set_func(vid_fmt,
  497. 0,
  498. NULL,
  499. NULL,
  500. NULL,
  501. &isize);
  502. if (status) {
  503. av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
  504. return AVERROR_EXTERNAL;
  505. }
  506. *size = isize;
  507. return 0;
  508. }
  509. /*
  510. * Returns true on success.
  511. *
  512. * If profile_level_val is NULL and this method returns true, don't specify the
  513. * profile/level to the encoder.
  514. */
  515. static bool get_vt_h264_profile_level(AVCodecContext *avctx,
  516. CFStringRef *profile_level_val)
  517. {
  518. VTEncContext *vtctx = avctx->priv_data;
  519. int64_t profile = vtctx->profile;
  520. if (profile == H264_PROF_AUTO && vtctx->level) {
  521. //Need to pick a profile if level is not auto-selected.
  522. profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
  523. }
  524. *profile_level_val = NULL;
  525. switch (profile) {
  526. case H264_PROF_AUTO:
  527. return true;
  528. case H264_PROF_BASELINE:
  529. switch (vtctx->level) {
  530. case 0: *profile_level_val =
  531. compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
  532. case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
  533. case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
  534. case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
  535. case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
  536. case 40: *profile_level_val =
  537. compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
  538. case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
  539. case 42: *profile_level_val =
  540. compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
  541. case 50: *profile_level_val =
  542. compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
  543. case 51: *profile_level_val =
  544. compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
  545. case 52: *profile_level_val =
  546. compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
  547. }
  548. break;
  549. case H264_PROF_MAIN:
  550. switch (vtctx->level) {
  551. case 0: *profile_level_val =
  552. compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
  553. case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
  554. case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
  555. case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
  556. case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
  557. case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
  558. case 42: *profile_level_val =
  559. compat_keys.kVTProfileLevel_H264_Main_4_2; break;
  560. case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
  561. case 51: *profile_level_val =
  562. compat_keys.kVTProfileLevel_H264_Main_5_1; break;
  563. case 52: *profile_level_val =
  564. compat_keys.kVTProfileLevel_H264_Main_5_2; break;
  565. }
  566. break;
  567. case H264_PROF_HIGH:
  568. switch (vtctx->level) {
  569. case 0: *profile_level_val =
  570. compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
  571. case 30: *profile_level_val =
  572. compat_keys.kVTProfileLevel_H264_High_3_0; break;
  573. case 31: *profile_level_val =
  574. compat_keys.kVTProfileLevel_H264_High_3_1; break;
  575. case 32: *profile_level_val =
  576. compat_keys.kVTProfileLevel_H264_High_3_2; break;
  577. case 40: *profile_level_val =
  578. compat_keys.kVTProfileLevel_H264_High_4_0; break;
  579. case 41: *profile_level_val =
  580. compat_keys.kVTProfileLevel_H264_High_4_1; break;
  581. case 42: *profile_level_val =
  582. compat_keys.kVTProfileLevel_H264_High_4_2; break;
  583. case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
  584. case 51: *profile_level_val =
  585. compat_keys.kVTProfileLevel_H264_High_5_1; break;
  586. case 52: *profile_level_val =
  587. compat_keys.kVTProfileLevel_H264_High_5_2; break;
  588. }
  589. break;
  590. }
  591. if (!*profile_level_val) {
  592. av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
  593. return false;
  594. }
  595. return true;
  596. }
  597. /*
  598. * Returns true on success.
  599. *
  600. * If profile_level_val is NULL and this method returns true, don't specify the
  601. * profile/level to the encoder.
  602. */
  603. static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
  604. CFStringRef *profile_level_val)
  605. {
  606. VTEncContext *vtctx = avctx->priv_data;
  607. int64_t profile = vtctx->profile;
  608. *profile_level_val = NULL;
  609. switch (profile) {
  610. case HEVC_PROF_AUTO:
  611. return true;
  612. case HEVC_PROF_MAIN:
  613. *profile_level_val =
  614. compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
  615. break;
  616. case HEVC_PROF_MAIN10:
  617. *profile_level_val =
  618. compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
  619. break;
  620. }
  621. if (!*profile_level_val) {
  622. av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
  623. return false;
  624. }
  625. return true;
  626. }
  627. static int get_cv_pixel_format(AVCodecContext* avctx,
  628. enum AVPixelFormat fmt,
  629. enum AVColorRange range,
  630. int* av_pixel_format,
  631. int* range_guessed)
  632. {
  633. if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
  634. range != AVCOL_RANGE_JPEG;
  635. //MPEG range is used when no range is set
  636. if (fmt == AV_PIX_FMT_NV12) {
  637. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  638. kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
  639. kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  640. } else if (fmt == AV_PIX_FMT_YUV420P) {
  641. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  642. kCVPixelFormatType_420YpCbCr8PlanarFullRange :
  643. kCVPixelFormatType_420YpCbCr8Planar;
  644. } else {
  645. return AVERROR(EINVAL);
  646. }
  647. return 0;
  648. }
  649. static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
  650. VTEncContext *vtctx = avctx->priv_data;
  651. if (vtctx->color_primaries) {
  652. CFDictionarySetValue(dict,
  653. kCVImageBufferColorPrimariesKey,
  654. vtctx->color_primaries);
  655. }
  656. if (vtctx->transfer_function) {
  657. CFDictionarySetValue(dict,
  658. kCVImageBufferTransferFunctionKey,
  659. vtctx->transfer_function);
  660. }
  661. if (vtctx->ycbcr_matrix) {
  662. CFDictionarySetValue(dict,
  663. kCVImageBufferYCbCrMatrixKey,
  664. vtctx->ycbcr_matrix);
  665. }
  666. }
  667. static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
  668. CFMutableDictionaryRef* dict)
  669. {
  670. CFNumberRef cv_color_format_num = NULL;
  671. CFNumberRef width_num = NULL;
  672. CFNumberRef height_num = NULL;
  673. CFMutableDictionaryRef pixel_buffer_info = NULL;
  674. int cv_color_format;
  675. int status = get_cv_pixel_format(avctx,
  676. avctx->pix_fmt,
  677. avctx->color_range,
  678. &cv_color_format,
  679. NULL);
  680. if (status) return status;
  681. pixel_buffer_info = CFDictionaryCreateMutable(
  682. kCFAllocatorDefault,
  683. 20,
  684. &kCFCopyStringDictionaryKeyCallBacks,
  685. &kCFTypeDictionaryValueCallBacks);
  686. if (!pixel_buffer_info) goto pbinfo_nomem;
  687. cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
  688. kCFNumberSInt32Type,
  689. &cv_color_format);
  690. if (!cv_color_format_num) goto pbinfo_nomem;
  691. CFDictionarySetValue(pixel_buffer_info,
  692. kCVPixelBufferPixelFormatTypeKey,
  693. cv_color_format_num);
  694. vt_release_num(&cv_color_format_num);
  695. width_num = CFNumberCreate(kCFAllocatorDefault,
  696. kCFNumberSInt32Type,
  697. &avctx->width);
  698. if (!width_num) return AVERROR(ENOMEM);
  699. CFDictionarySetValue(pixel_buffer_info,
  700. kCVPixelBufferWidthKey,
  701. width_num);
  702. vt_release_num(&width_num);
  703. height_num = CFNumberCreate(kCFAllocatorDefault,
  704. kCFNumberSInt32Type,
  705. &avctx->height);
  706. if (!height_num) goto pbinfo_nomem;
  707. CFDictionarySetValue(pixel_buffer_info,
  708. kCVPixelBufferHeightKey,
  709. height_num);
  710. vt_release_num(&height_num);
  711. add_color_attr(avctx, pixel_buffer_info);
  712. *dict = pixel_buffer_info;
  713. return 0;
  714. pbinfo_nomem:
  715. vt_release_num(&cv_color_format_num);
  716. vt_release_num(&width_num);
  717. vt_release_num(&height_num);
  718. if (pixel_buffer_info) CFRelease(pixel_buffer_info);
  719. return AVERROR(ENOMEM);
  720. }
  721. static int get_cv_color_primaries(AVCodecContext *avctx,
  722. CFStringRef *primaries)
  723. {
  724. enum AVColorPrimaries pri = avctx->color_primaries;
  725. switch (pri) {
  726. case AVCOL_PRI_UNSPECIFIED:
  727. *primaries = NULL;
  728. break;
  729. case AVCOL_PRI_BT709:
  730. *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
  731. break;
  732. case AVCOL_PRI_BT2020:
  733. *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
  734. break;
  735. default:
  736. av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
  737. *primaries = NULL;
  738. return -1;
  739. }
  740. return 0;
  741. }
  742. static int get_cv_transfer_function(AVCodecContext *avctx,
  743. CFStringRef *transfer_fnc,
  744. CFNumberRef *gamma_level)
  745. {
  746. enum AVColorTransferCharacteristic trc = avctx->color_trc;
  747. Float32 gamma;
  748. *gamma_level = NULL;
  749. switch (trc) {
  750. case AVCOL_TRC_UNSPECIFIED:
  751. *transfer_fnc = NULL;
  752. break;
  753. case AVCOL_TRC_BT709:
  754. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
  755. break;
  756. case AVCOL_TRC_SMPTE240M:
  757. *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
  758. break;
  759. case AVCOL_TRC_GAMMA22:
  760. gamma = 2.2;
  761. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  762. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  763. break;
  764. case AVCOL_TRC_GAMMA28:
  765. gamma = 2.8;
  766. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  767. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  768. break;
  769. case AVCOL_TRC_BT2020_10:
  770. case AVCOL_TRC_BT2020_12:
  771. *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
  772. break;
  773. default:
  774. av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
  775. return -1;
  776. }
  777. return 0;
  778. }
  779. static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
  780. switch(avctx->colorspace) {
  781. case AVCOL_SPC_BT709:
  782. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
  783. break;
  784. case AVCOL_SPC_UNSPECIFIED:
  785. *matrix = NULL;
  786. break;
  787. case AVCOL_SPC_BT470BG:
  788. case AVCOL_SPC_SMPTE170M:
  789. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
  790. break;
  791. case AVCOL_SPC_SMPTE240M:
  792. *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
  793. break;
  794. case AVCOL_SPC_BT2020_NCL:
  795. *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
  796. break;
  797. default:
  798. av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
  799. return -1;
  800. }
  801. return 0;
  802. }
  803. static int vtenc_create_encoder(AVCodecContext *avctx,
  804. CMVideoCodecType codec_type,
  805. CFStringRef profile_level,
  806. CFNumberRef gamma_level,
  807. CFDictionaryRef enc_info,
  808. CFDictionaryRef pixel_buffer_info,
  809. VTCompressionSessionRef *session)
  810. {
  811. VTEncContext *vtctx = avctx->priv_data;
  812. SInt32 bit_rate = avctx->bit_rate;
  813. SInt32 max_rate = avctx->rc_max_rate;
  814. CFNumberRef bit_rate_num;
  815. CFNumberRef bytes_per_second;
  816. CFNumberRef one_second;
  817. CFArrayRef data_rate_limits;
  818. int64_t bytes_per_second_value = 0;
  819. int64_t one_second_value = 0;
  820. void *nums[2];
  821. int status = VTCompressionSessionCreate(kCFAllocatorDefault,
  822. avctx->width,
  823. avctx->height,
  824. codec_type,
  825. enc_info,
  826. pixel_buffer_info,
  827. kCFAllocatorDefault,
  828. vtenc_output_callback,
  829. avctx,
  830. session);
  831. if (status || !vtctx->session) {
  832. av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
  833. #if !TARGET_OS_IPHONE
  834. if (!vtctx->allow_sw) {
  835. av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
  836. }
  837. #endif
  838. return AVERROR_EXTERNAL;
  839. }
  840. bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
  841. kCFNumberSInt32Type,
  842. &bit_rate);
  843. if (!bit_rate_num) return AVERROR(ENOMEM);
  844. status = VTSessionSetProperty(vtctx->session,
  845. kVTCompressionPropertyKey_AverageBitRate,
  846. bit_rate_num);
  847. CFRelease(bit_rate_num);
  848. if (status) {
  849. av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
  850. return AVERROR_EXTERNAL;
  851. }
  852. if (vtctx->codec_id == AV_CODEC_ID_H264) {
  853. // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
  854. bytes_per_second_value = max_rate >> 3;
  855. bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
  856. kCFNumberSInt64Type,
  857. &bytes_per_second_value);
  858. if (!bytes_per_second) {
  859. return AVERROR(ENOMEM);
  860. }
  861. one_second_value = 1;
  862. one_second = CFNumberCreate(kCFAllocatorDefault,
  863. kCFNumberSInt64Type,
  864. &one_second_value);
  865. if (!one_second) {
  866. CFRelease(bytes_per_second);
  867. return AVERROR(ENOMEM);
  868. }
  869. nums[0] = (void *)bytes_per_second;
  870. nums[1] = (void *)one_second;
  871. data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
  872. (const void **)nums,
  873. 2,
  874. &kCFTypeArrayCallBacks);
  875. if (!data_rate_limits) {
  876. CFRelease(bytes_per_second);
  877. CFRelease(one_second);
  878. return AVERROR(ENOMEM);
  879. }
  880. status = VTSessionSetProperty(vtctx->session,
  881. kVTCompressionPropertyKey_DataRateLimits,
  882. data_rate_limits);
  883. CFRelease(bytes_per_second);
  884. CFRelease(one_second);
  885. CFRelease(data_rate_limits);
  886. if (status) {
  887. av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
  888. return AVERROR_EXTERNAL;
  889. }
  890. if (profile_level) {
  891. status = VTSessionSetProperty(vtctx->session,
  892. kVTCompressionPropertyKey_ProfileLevel,
  893. profile_level);
  894. if (status) {
  895. av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
  896. }
  897. }
  898. }
  899. if (avctx->gop_size > 0) {
  900. CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
  901. kCFNumberIntType,
  902. &avctx->gop_size);
  903. if (!interval) {
  904. return AVERROR(ENOMEM);
  905. }
  906. status = VTSessionSetProperty(vtctx->session,
  907. kVTCompressionPropertyKey_MaxKeyFrameInterval,
  908. interval);
  909. CFRelease(interval);
  910. if (status) {
  911. av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
  912. return AVERROR_EXTERNAL;
  913. }
  914. }
  915. if (vtctx->frames_before) {
  916. status = VTSessionSetProperty(vtctx->session,
  917. kVTCompressionPropertyKey_MoreFramesBeforeStart,
  918. kCFBooleanTrue);
  919. if (status == kVTPropertyNotSupportedErr) {
  920. av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
  921. } else if (status) {
  922. av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
  923. }
  924. }
  925. if (vtctx->frames_after) {
  926. status = VTSessionSetProperty(vtctx->session,
  927. kVTCompressionPropertyKey_MoreFramesAfterEnd,
  928. kCFBooleanTrue);
  929. if (status == kVTPropertyNotSupportedErr) {
  930. av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
  931. } else if (status) {
  932. av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
  933. }
  934. }
  935. if (avctx->sample_aspect_ratio.num != 0) {
  936. CFNumberRef num;
  937. CFNumberRef den;
  938. CFMutableDictionaryRef par;
  939. AVRational *avpar = &avctx->sample_aspect_ratio;
  940. av_reduce(&avpar->num, &avpar->den,
  941. avpar->num, avpar->den,
  942. 0xFFFFFFFF);
  943. num = CFNumberCreate(kCFAllocatorDefault,
  944. kCFNumberIntType,
  945. &avpar->num);
  946. den = CFNumberCreate(kCFAllocatorDefault,
  947. kCFNumberIntType,
  948. &avpar->den);
  949. par = CFDictionaryCreateMutable(kCFAllocatorDefault,
  950. 2,
  951. &kCFCopyStringDictionaryKeyCallBacks,
  952. &kCFTypeDictionaryValueCallBacks);
  953. if (!par || !num || !den) {
  954. if (par) CFRelease(par);
  955. if (num) CFRelease(num);
  956. if (den) CFRelease(den);
  957. return AVERROR(ENOMEM);
  958. }
  959. CFDictionarySetValue(
  960. par,
  961. kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
  962. num);
  963. CFDictionarySetValue(
  964. par,
  965. kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
  966. den);
  967. status = VTSessionSetProperty(vtctx->session,
  968. kVTCompressionPropertyKey_PixelAspectRatio,
  969. par);
  970. CFRelease(par);
  971. CFRelease(num);
  972. CFRelease(den);
  973. if (status) {
  974. av_log(avctx,
  975. AV_LOG_ERROR,
  976. "Error setting pixel aspect ratio to %d:%d: %d.\n",
  977. avctx->sample_aspect_ratio.num,
  978. avctx->sample_aspect_ratio.den,
  979. status);
  980. return AVERROR_EXTERNAL;
  981. }
  982. }
  983. if (vtctx->transfer_function) {
  984. status = VTSessionSetProperty(vtctx->session,
  985. kVTCompressionPropertyKey_TransferFunction,
  986. vtctx->transfer_function);
  987. if (status) {
  988. av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
  989. }
  990. }
  991. if (vtctx->ycbcr_matrix) {
  992. status = VTSessionSetProperty(vtctx->session,
  993. kVTCompressionPropertyKey_YCbCrMatrix,
  994. vtctx->ycbcr_matrix);
  995. if (status) {
  996. av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
  997. }
  998. }
  999. if (vtctx->color_primaries) {
  1000. status = VTSessionSetProperty(vtctx->session,
  1001. kVTCompressionPropertyKey_ColorPrimaries,
  1002. vtctx->color_primaries);
  1003. if (status) {
  1004. av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
  1005. }
  1006. }
  1007. if (gamma_level) {
  1008. status = VTSessionSetProperty(vtctx->session,
  1009. kCVImageBufferGammaLevelKey,
  1010. gamma_level);
  1011. if (status) {
  1012. av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
  1013. }
  1014. }
  1015. if (!vtctx->has_b_frames) {
  1016. status = VTSessionSetProperty(vtctx->session,
  1017. kVTCompressionPropertyKey_AllowFrameReordering,
  1018. kCFBooleanFalse);
  1019. if (status) {
  1020. av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
  1021. return AVERROR_EXTERNAL;
  1022. }
  1023. }
  1024. if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
  1025. CFStringRef entropy = vtctx->entropy == VT_CABAC ?
  1026. compat_keys.kVTH264EntropyMode_CABAC:
  1027. compat_keys.kVTH264EntropyMode_CAVLC;
  1028. status = VTSessionSetProperty(vtctx->session,
  1029. compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
  1030. entropy);
  1031. if (status) {
  1032. av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
  1033. }
  1034. }
  1035. if (vtctx->realtime) {
  1036. status = VTSessionSetProperty(vtctx->session,
  1037. compat_keys.kVTCompressionPropertyKey_RealTime,
  1038. kCFBooleanTrue);
  1039. if (status) {
  1040. av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
  1041. }
  1042. }
  1043. status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
  1044. if (status) {
  1045. av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
  1046. return AVERROR_EXTERNAL;
  1047. }
  1048. return 0;
  1049. }
  1050. static int vtenc_configure_encoder(AVCodecContext *avctx)
  1051. {
  1052. CFMutableDictionaryRef enc_info;
  1053. CFMutableDictionaryRef pixel_buffer_info;
  1054. CMVideoCodecType codec_type;
  1055. VTEncContext *vtctx = avctx->priv_data;
  1056. CFStringRef profile_level;
  1057. CFNumberRef gamma_level = NULL;
  1058. int status;
  1059. codec_type = get_cm_codec_type(avctx->codec_id);
  1060. if (!codec_type) {
  1061. av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
  1062. return AVERROR(EINVAL);
  1063. }
  1064. vtctx->codec_id = avctx->codec_id;
  1065. if (vtctx->codec_id == AV_CODEC_ID_H264) {
  1066. vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
  1067. vtctx->has_b_frames = avctx->max_b_frames > 0;
  1068. if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
  1069. av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
  1070. vtctx->has_b_frames = false;
  1071. }
  1072. if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
  1073. av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
  1074. vtctx->entropy = VT_ENTROPY_NOT_SET;
  1075. }
  1076. if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
  1077. } else {
  1078. vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
  1079. if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
  1080. if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
  1081. }
  1082. enc_info = CFDictionaryCreateMutable(
  1083. kCFAllocatorDefault,
  1084. 20,
  1085. &kCFCopyStringDictionaryKeyCallBacks,
  1086. &kCFTypeDictionaryValueCallBacks
  1087. );
  1088. if (!enc_info) return AVERROR(ENOMEM);
  1089. #if !TARGET_OS_IPHONE
  1090. if (!vtctx->allow_sw) {
  1091. CFDictionarySetValue(enc_info,
  1092. compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
  1093. kCFBooleanTrue);
  1094. } else {
  1095. CFDictionarySetValue(enc_info,
  1096. compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
  1097. kCFBooleanTrue);
  1098. }
  1099. #endif
  1100. if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
  1101. status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
  1102. if (status)
  1103. goto init_cleanup;
  1104. } else {
  1105. pixel_buffer_info = NULL;
  1106. }
  1107. vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
  1108. get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
  1109. get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
  1110. get_cv_color_primaries(avctx, &vtctx->color_primaries);
  1111. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  1112. status = vtenc_populate_extradata(avctx,
  1113. codec_type,
  1114. profile_level,
  1115. gamma_level,
  1116. enc_info,
  1117. pixel_buffer_info);
  1118. if (status)
  1119. goto init_cleanup;
  1120. }
  1121. status = vtenc_create_encoder(avctx,
  1122. codec_type,
  1123. profile_level,
  1124. gamma_level,
  1125. enc_info,
  1126. pixel_buffer_info,
  1127. &vtctx->session);
  1128. init_cleanup:
  1129. if (gamma_level)
  1130. CFRelease(gamma_level);
  1131. if (pixel_buffer_info)
  1132. CFRelease(pixel_buffer_info);
  1133. CFRelease(enc_info);
  1134. return status;
  1135. }
  1136. static av_cold int vtenc_init(AVCodecContext *avctx)
  1137. {
  1138. VTEncContext *vtctx = avctx->priv_data;
  1139. CFBooleanRef has_b_frames_cfbool;
  1140. int status;
  1141. pthread_once(&once_ctrl, loadVTEncSymbols);
  1142. pthread_mutex_init(&vtctx->lock, NULL);
  1143. pthread_cond_init(&vtctx->cv_sample_sent, NULL);
  1144. vtctx->session = NULL;
  1145. status = vtenc_configure_encoder(avctx);
  1146. if (status) return status;
  1147. status = VTSessionCopyProperty(vtctx->session,
  1148. kVTCompressionPropertyKey_AllowFrameReordering,
  1149. kCFAllocatorDefault,
  1150. &has_b_frames_cfbool);
  1151. if (!status && has_b_frames_cfbool) {
  1152. //Some devices don't output B-frames for main profile, even if requested.
  1153. vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
  1154. CFRelease(has_b_frames_cfbool);
  1155. }
  1156. avctx->has_b_frames = vtctx->has_b_frames;
  1157. return 0;
  1158. }
  1159. static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
  1160. {
  1161. CFArrayRef attachments;
  1162. CFDictionaryRef attachment;
  1163. CFBooleanRef not_sync;
  1164. CFIndex len;
  1165. attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
  1166. len = !attachments ? 0 : CFArrayGetCount(attachments);
  1167. if (!len) {
  1168. *is_key_frame = true;
  1169. return;
  1170. }
  1171. attachment = CFArrayGetValueAtIndex(attachments, 0);
  1172. if (CFDictionaryGetValueIfPresent(attachment,
  1173. kCMSampleAttachmentKey_NotSync,
  1174. (const void **)&not_sync))
  1175. {
  1176. *is_key_frame = !CFBooleanGetValue(not_sync);
  1177. } else {
  1178. *is_key_frame = true;
  1179. }
  1180. }
  1181. static int is_post_sei_nal_type(int nal_type){
  1182. return nal_type != H264_NAL_SEI &&
  1183. nal_type != H264_NAL_SPS &&
  1184. nal_type != H264_NAL_PPS &&
  1185. nal_type != H264_NAL_AUD;
  1186. }
  1187. /*
  1188. * Finds the sei message start/size of type find_sei_type.
  1189. * If more than one of that type exists, the last one is returned.
  1190. */
  1191. static int find_sei_end(AVCodecContext *avctx,
  1192. uint8_t *nal_data,
  1193. size_t nal_size,
  1194. uint8_t **sei_end)
  1195. {
  1196. int nal_type;
  1197. size_t sei_payload_size = 0;
  1198. int sei_payload_type = 0;
  1199. *sei_end = NULL;
  1200. uint8_t *nal_start = nal_data;
  1201. if (!nal_size)
  1202. return 0;
  1203. nal_type = *nal_data & 0x1F;
  1204. if (nal_type != H264_NAL_SEI)
  1205. return 0;
  1206. nal_data++;
  1207. nal_size--;
  1208. if (nal_data[nal_size - 1] == 0x80)
  1209. nal_size--;
  1210. while (nal_size > 0 && *nal_data > 0) {
  1211. do{
  1212. sei_payload_type += *nal_data;
  1213. nal_data++;
  1214. nal_size--;
  1215. } while (nal_size > 0 && *nal_data == 0xFF);
  1216. if (!nal_size) {
  1217. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
  1218. return AVERROR_INVALIDDATA;
  1219. }
  1220. do{
  1221. sei_payload_size += *nal_data;
  1222. nal_data++;
  1223. nal_size--;
  1224. } while (nal_size > 0 && *nal_data == 0xFF);
  1225. if (nal_size < sei_payload_size) {
  1226. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
  1227. return AVERROR_INVALIDDATA;
  1228. }
  1229. nal_data += sei_payload_size;
  1230. nal_size -= sei_payload_size;
  1231. }
  1232. *sei_end = nal_data;
  1233. return nal_data - nal_start + 1;
  1234. }
  1235. /**
  1236. * Copies the data inserting emulation prevention bytes as needed.
  1237. * Existing data in the destination can be taken into account by providing
  1238. * dst with a dst_offset > 0.
  1239. *
  1240. * @return The number of bytes copied on success. On failure, the negative of
  1241. * the number of bytes needed to copy src is returned.
  1242. */
  1243. static int copy_emulation_prev(const uint8_t *src,
  1244. size_t src_size,
  1245. uint8_t *dst,
  1246. ssize_t dst_offset,
  1247. size_t dst_size)
  1248. {
  1249. int zeros = 0;
  1250. int wrote_bytes;
  1251. uint8_t* dst_start;
  1252. uint8_t* dst_end = dst + dst_size;
  1253. const uint8_t* src_end = src + src_size;
  1254. int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
  1255. int i;
  1256. for (i = start_at; i < dst_offset && i < dst_size; i++) {
  1257. if (!dst[i])
  1258. zeros++;
  1259. else
  1260. zeros = 0;
  1261. }
  1262. dst += dst_offset;
  1263. dst_start = dst;
  1264. for (; src < src_end; src++, dst++) {
  1265. if (zeros == 2) {
  1266. int insert_ep3_byte = *src <= 3;
  1267. if (insert_ep3_byte) {
  1268. if (dst < dst_end)
  1269. *dst = 3;
  1270. dst++;
  1271. }
  1272. zeros = 0;
  1273. }
  1274. if (dst < dst_end)
  1275. *dst = *src;
  1276. if (!*src)
  1277. zeros++;
  1278. else
  1279. zeros = 0;
  1280. }
  1281. wrote_bytes = dst - dst_start;
  1282. if (dst > dst_end)
  1283. return -wrote_bytes;
  1284. return wrote_bytes;
  1285. }
  1286. static int write_sei(const ExtraSEI *sei,
  1287. int sei_type,
  1288. uint8_t *dst,
  1289. size_t dst_size)
  1290. {
  1291. uint8_t *sei_start = dst;
  1292. size_t remaining_sei_size = sei->size;
  1293. size_t remaining_dst_size = dst_size;
  1294. int header_bytes;
  1295. int bytes_written;
  1296. ssize_t offset;
  1297. if (!remaining_dst_size)
  1298. return AVERROR_BUFFER_TOO_SMALL;
  1299. while (sei_type && remaining_dst_size != 0) {
  1300. int sei_byte = sei_type > 255 ? 255 : sei_type;
  1301. *dst = sei_byte;
  1302. sei_type -= sei_byte;
  1303. dst++;
  1304. remaining_dst_size--;
  1305. }
  1306. if (!dst_size)
  1307. return AVERROR_BUFFER_TOO_SMALL;
  1308. while (remaining_sei_size && remaining_dst_size != 0) {
  1309. int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
  1310. *dst = size_byte;
  1311. remaining_sei_size -= size_byte;
  1312. dst++;
  1313. remaining_dst_size--;
  1314. }
  1315. if (remaining_dst_size < sei->size)
  1316. return AVERROR_BUFFER_TOO_SMALL;
  1317. header_bytes = dst - sei_start;
  1318. offset = header_bytes;
  1319. bytes_written = copy_emulation_prev(sei->data,
  1320. sei->size,
  1321. sei_start,
  1322. offset,
  1323. dst_size);
  1324. if (bytes_written < 0)
  1325. return AVERROR_BUFFER_TOO_SMALL;
  1326. bytes_written += header_bytes;
  1327. return bytes_written;
  1328. }
  1329. /**
  1330. * Copies NAL units and replaces length codes with
  1331. * H.264 Annex B start codes. On failure, the contents of
  1332. * dst_data may have been modified.
  1333. *
  1334. * @param length_code_size Byte length of each length code
  1335. * @param sample_buffer NAL units prefixed with length codes.
  1336. * @param sei Optional A53 closed captions SEI data.
  1337. * @param dst_data Must be zeroed before calling this function.
  1338. * Contains the copied NAL units prefixed with
  1339. * start codes when the function returns
  1340. * successfully.
  1341. * @param dst_size Length of dst_data
  1342. * @return 0 on success
  1343. * AVERROR_INVALIDDATA if length_code_size is invalid
  1344. * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
  1345. * or if a length_code in src_data specifies data beyond
  1346. * the end of its buffer.
  1347. */
  1348. static int copy_replace_length_codes(
  1349. AVCodecContext *avctx,
  1350. size_t length_code_size,
  1351. CMSampleBufferRef sample_buffer,
  1352. ExtraSEI *sei,
  1353. uint8_t *dst_data,
  1354. size_t dst_size)
  1355. {
  1356. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1357. size_t remaining_src_size = src_size;
  1358. size_t remaining_dst_size = dst_size;
  1359. size_t src_offset = 0;
  1360. int wrote_sei = 0;
  1361. int status;
  1362. uint8_t size_buf[4];
  1363. uint8_t nal_type;
  1364. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  1365. if (length_code_size > 4) {
  1366. return AVERROR_INVALIDDATA;
  1367. }
  1368. while (remaining_src_size > 0) {
  1369. size_t curr_src_len;
  1370. size_t curr_dst_len;
  1371. size_t box_len = 0;
  1372. size_t i;
  1373. uint8_t *dst_box;
  1374. status = CMBlockBufferCopyDataBytes(block,
  1375. src_offset,
  1376. length_code_size,
  1377. size_buf);
  1378. if (status) {
  1379. av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
  1380. return AVERROR_EXTERNAL;
  1381. }
  1382. status = CMBlockBufferCopyDataBytes(block,
  1383. src_offset + length_code_size,
  1384. 1,
  1385. &nal_type);
  1386. if (status) {
  1387. av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
  1388. return AVERROR_EXTERNAL;
  1389. }
  1390. nal_type &= 0x1F;
  1391. for (i = 0; i < length_code_size; i++) {
  1392. box_len <<= 8;
  1393. box_len |= size_buf[i];
  1394. }
  1395. if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
  1396. //No SEI NAL unit - insert.
  1397. int wrote_bytes;
  1398. memcpy(dst_data, start_code, sizeof(start_code));
  1399. dst_data += sizeof(start_code);
  1400. remaining_dst_size -= sizeof(start_code);
  1401. *dst_data = H264_NAL_SEI;
  1402. dst_data++;
  1403. remaining_dst_size--;
  1404. wrote_bytes = write_sei(sei,
  1405. H264_SEI_TYPE_USER_DATA_REGISTERED,
  1406. dst_data,
  1407. remaining_dst_size);
  1408. if (wrote_bytes < 0)
  1409. return wrote_bytes;
  1410. remaining_dst_size -= wrote_bytes;
  1411. dst_data += wrote_bytes;
  1412. if (remaining_dst_size <= 0)
  1413. return AVERROR_BUFFER_TOO_SMALL;
  1414. *dst_data = 0x80;
  1415. dst_data++;
  1416. remaining_dst_size--;
  1417. wrote_sei = 1;
  1418. }
  1419. curr_src_len = box_len + length_code_size;
  1420. curr_dst_len = box_len + sizeof(start_code);
  1421. if (remaining_src_size < curr_src_len) {
  1422. return AVERROR_BUFFER_TOO_SMALL;
  1423. }
  1424. if (remaining_dst_size < curr_dst_len) {
  1425. return AVERROR_BUFFER_TOO_SMALL;
  1426. }
  1427. dst_box = dst_data + sizeof(start_code);
  1428. memcpy(dst_data, start_code, sizeof(start_code));
  1429. status = CMBlockBufferCopyDataBytes(block,
  1430. src_offset + length_code_size,
  1431. box_len,
  1432. dst_box);
  1433. if (status) {
  1434. av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
  1435. return AVERROR_EXTERNAL;
  1436. }
  1437. if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
  1438. //Found SEI NAL unit - append.
  1439. int wrote_bytes;
  1440. int old_sei_length;
  1441. int extra_bytes;
  1442. uint8_t *new_sei;
  1443. old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
  1444. if (old_sei_length < 0)
  1445. return status;
  1446. wrote_bytes = write_sei(sei,
  1447. H264_SEI_TYPE_USER_DATA_REGISTERED,
  1448. new_sei,
  1449. remaining_dst_size - old_sei_length);
  1450. if (wrote_bytes < 0)
  1451. return wrote_bytes;
  1452. if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
  1453. return AVERROR_BUFFER_TOO_SMALL;
  1454. new_sei[wrote_bytes++] = 0x80;
  1455. extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
  1456. dst_data += extra_bytes;
  1457. remaining_dst_size -= extra_bytes;
  1458. wrote_sei = 1;
  1459. }
  1460. src_offset += curr_src_len;
  1461. dst_data += curr_dst_len;
  1462. remaining_src_size -= curr_src_len;
  1463. remaining_dst_size -= curr_dst_len;
  1464. }
  1465. return 0;
  1466. }
  1467. /**
  1468. * Returns a sufficient number of bytes to contain the sei data.
  1469. * It may be greater than the minimum required.
  1470. */
  1471. static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
  1472. int copied_size;
  1473. if (sei->size == 0)
  1474. return 0;
  1475. copied_size = -copy_emulation_prev(sei->data,
  1476. sei->size,
  1477. NULL,
  1478. 0,
  1479. 0);
  1480. if ((sei->size % 255) == 0) //may result in an extra byte
  1481. copied_size++;
  1482. return copied_size + sei->size / 255 + 1 + type / 255 + 1;
  1483. }
  1484. static int vtenc_cm_to_avpacket(
  1485. AVCodecContext *avctx,
  1486. CMSampleBufferRef sample_buffer,
  1487. AVPacket *pkt,
  1488. ExtraSEI *sei)
  1489. {
  1490. VTEncContext *vtctx = avctx->priv_data;
  1491. int status;
  1492. bool is_key_frame;
  1493. bool add_header;
  1494. size_t length_code_size;
  1495. size_t header_size = 0;
  1496. size_t in_buf_size;
  1497. size_t out_buf_size;
  1498. size_t sei_nalu_size = 0;
  1499. int64_t dts_delta;
  1500. int64_t time_base_num;
  1501. int nalu_count;
  1502. CMTime pts;
  1503. CMTime dts;
  1504. CMVideoFormatDescriptionRef vid_fmt;
  1505. vtenc_get_frame_info(sample_buffer, &is_key_frame);
  1506. status = get_length_code_size(avctx, sample_buffer, &length_code_size);
  1507. if (status) return status;
  1508. add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
  1509. if (add_header) {
  1510. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  1511. if (!vid_fmt) {
  1512. av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
  1513. return AVERROR_EXTERNAL;
  1514. }
  1515. int status = get_params_size(avctx, vid_fmt, &header_size);
  1516. if (status) return status;
  1517. }
  1518. status = count_nalus(length_code_size, sample_buffer, &nalu_count);
  1519. if(status)
  1520. return status;
  1521. if (sei) {
  1522. size_t msg_size = get_sei_msg_bytes(sei,
  1523. H264_SEI_TYPE_USER_DATA_REGISTERED);
  1524. sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
  1525. }
  1526. in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1527. out_buf_size = header_size +
  1528. in_buf_size +
  1529. sei_nalu_size +
  1530. nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
  1531. status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
  1532. if (status < 0)
  1533. return status;
  1534. if (add_header) {
  1535. status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
  1536. if(status) return status;
  1537. }
  1538. status = copy_replace_length_codes(
  1539. avctx,
  1540. length_code_size,
  1541. sample_buffer,
  1542. sei,
  1543. pkt->data + header_size,
  1544. pkt->size - header_size
  1545. );
  1546. if (status) {
  1547. av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
  1548. return status;
  1549. }
  1550. if (is_key_frame) {
  1551. pkt->flags |= AV_PKT_FLAG_KEY;
  1552. }
  1553. pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
  1554. dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
  1555. if (CMTIME_IS_INVALID(dts)) {
  1556. if (!vtctx->has_b_frames) {
  1557. dts = pts;
  1558. } else {
  1559. av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
  1560. return AVERROR_EXTERNAL;
  1561. }
  1562. }
  1563. dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
  1564. time_base_num = avctx->time_base.num;
  1565. pkt->pts = pts.value / time_base_num;
  1566. pkt->dts = dts.value / time_base_num - dts_delta;
  1567. pkt->size = out_buf_size;
  1568. return 0;
  1569. }
  1570. /*
  1571. * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
  1572. * containing all planes if so.
  1573. */
  1574. static int get_cv_pixel_info(
  1575. AVCodecContext *avctx,
  1576. const AVFrame *frame,
  1577. int *color,
  1578. int *plane_count,
  1579. size_t *widths,
  1580. size_t *heights,
  1581. size_t *strides,
  1582. size_t *contiguous_buf_size)
  1583. {
  1584. VTEncContext *vtctx = avctx->priv_data;
  1585. int av_format = frame->format;
  1586. int av_color_range = frame->color_range;
  1587. int i;
  1588. int range_guessed;
  1589. int status;
  1590. status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
  1591. if (status) {
  1592. av_log(avctx,
  1593. AV_LOG_ERROR,
  1594. "Could not get pixel format for color format '%s' range '%s'.\n",
  1595. av_get_pix_fmt_name(av_format),
  1596. av_color_range > AVCOL_RANGE_UNSPECIFIED &&
  1597. av_color_range < AVCOL_RANGE_NB ?
  1598. av_color_range_name(av_color_range) :
  1599. "Unknown");
  1600. return AVERROR(EINVAL);
  1601. }
  1602. if (range_guessed) {
  1603. if (!vtctx->warned_color_range) {
  1604. vtctx->warned_color_range = true;
  1605. av_log(avctx,
  1606. AV_LOG_WARNING,
  1607. "Color range not set for %s. Using MPEG range.\n",
  1608. av_get_pix_fmt_name(av_format));
  1609. }
  1610. }
  1611. switch (av_format) {
  1612. case AV_PIX_FMT_NV12:
  1613. *plane_count = 2;
  1614. widths [0] = avctx->width;
  1615. heights[0] = avctx->height;
  1616. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1617. widths [1] = (avctx->width + 1) / 2;
  1618. heights[1] = (avctx->height + 1) / 2;
  1619. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
  1620. break;
  1621. case AV_PIX_FMT_YUV420P:
  1622. *plane_count = 3;
  1623. widths [0] = avctx->width;
  1624. heights[0] = avctx->height;
  1625. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1626. widths [1] = (avctx->width + 1) / 2;
  1627. heights[1] = (avctx->height + 1) / 2;
  1628. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
  1629. widths [2] = (avctx->width + 1) / 2;
  1630. heights[2] = (avctx->height + 1) / 2;
  1631. strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
  1632. break;
  1633. default:
  1634. av_log(
  1635. avctx,
  1636. AV_LOG_ERROR,
  1637. "Could not get frame format info for color %d range %d.\n",
  1638. av_format,
  1639. av_color_range);
  1640. return AVERROR(EINVAL);
  1641. }
  1642. *contiguous_buf_size = 0;
  1643. for (i = 0; i < *plane_count; i++) {
  1644. if (i < *plane_count - 1 &&
  1645. frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
  1646. *contiguous_buf_size = 0;
  1647. break;
  1648. }
  1649. *contiguous_buf_size += strides[i] * heights[i];
  1650. }
  1651. return 0;
  1652. }
  1653. #if !TARGET_OS_IPHONE
  1654. //Not used on iOS - frame is always copied.
  1655. static void free_avframe(
  1656. void *release_ctx,
  1657. const void *data,
  1658. size_t size,
  1659. size_t plane_count,
  1660. const void *plane_addresses[])
  1661. {
  1662. AVFrame *frame = release_ctx;
  1663. av_frame_free(&frame);
  1664. }
  1665. #else
  1666. //Not used on OSX - frame is never copied.
  1667. static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
  1668. const AVFrame *frame,
  1669. CVPixelBufferRef cv_img,
  1670. const size_t *plane_strides,
  1671. const size_t *plane_rows)
  1672. {
  1673. int i, j;
  1674. size_t plane_count;
  1675. int status;
  1676. int rows;
  1677. int src_stride;
  1678. int dst_stride;
  1679. uint8_t *src_addr;
  1680. uint8_t *dst_addr;
  1681. size_t copy_bytes;
  1682. status = CVPixelBufferLockBaseAddress(cv_img, 0);
  1683. if (status) {
  1684. av_log(
  1685. avctx,
  1686. AV_LOG_ERROR,
  1687. "Error: Could not lock base address of CVPixelBuffer: %d.\n",
  1688. status
  1689. );
  1690. }
  1691. if (CVPixelBufferIsPlanar(cv_img)) {
  1692. plane_count = CVPixelBufferGetPlaneCount(cv_img);
  1693. for (i = 0; frame->data[i]; i++) {
  1694. if (i == plane_count) {
  1695. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1696. av_log(avctx,
  1697. AV_LOG_ERROR,
  1698. "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
  1699. );
  1700. return AVERROR_EXTERNAL;
  1701. }
  1702. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
  1703. src_addr = (uint8_t*)frame->data[i];
  1704. dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
  1705. src_stride = plane_strides[i];
  1706. rows = plane_rows[i];
  1707. if (dst_stride == src_stride) {
  1708. memcpy(dst_addr, src_addr, src_stride * rows);
  1709. } else {
  1710. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1711. for (j = 0; j < rows; j++) {
  1712. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1713. }
  1714. }
  1715. }
  1716. } else {
  1717. if (frame->data[1]) {
  1718. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1719. av_log(avctx,
  1720. AV_LOG_ERROR,
  1721. "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
  1722. );
  1723. return AVERROR_EXTERNAL;
  1724. }
  1725. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
  1726. src_addr = (uint8_t*)frame->data[0];
  1727. dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
  1728. src_stride = plane_strides[0];
  1729. rows = plane_rows[0];
  1730. if (dst_stride == src_stride) {
  1731. memcpy(dst_addr, src_addr, src_stride * rows);
  1732. } else {
  1733. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1734. for (j = 0; j < rows; j++) {
  1735. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1736. }
  1737. }
  1738. }
  1739. status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1740. if (status) {
  1741. av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
  1742. return AVERROR_EXTERNAL;
  1743. }
  1744. return 0;
  1745. }
  1746. #endif //!TARGET_OS_IPHONE
  1747. static int create_cv_pixel_buffer(AVCodecContext *avctx,
  1748. const AVFrame *frame,
  1749. CVPixelBufferRef *cv_img)
  1750. {
  1751. int plane_count;
  1752. int color;
  1753. size_t widths [AV_NUM_DATA_POINTERS];
  1754. size_t heights[AV_NUM_DATA_POINTERS];
  1755. size_t strides[AV_NUM_DATA_POINTERS];
  1756. int status;
  1757. size_t contiguous_buf_size;
  1758. #if TARGET_OS_IPHONE
  1759. CVPixelBufferPoolRef pix_buf_pool;
  1760. VTEncContext* vtctx = avctx->priv_data;
  1761. #else
  1762. CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
  1763. kCFAllocatorDefault,
  1764. 10,
  1765. &kCFCopyStringDictionaryKeyCallBacks,
  1766. &kCFTypeDictionaryValueCallBacks);
  1767. if (!pix_buf_attachments) return AVERROR(ENOMEM);
  1768. #endif
  1769. if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
  1770. av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
  1771. *cv_img = (CVPixelBufferRef)frame->data[3];
  1772. av_assert0(*cv_img);
  1773. CFRetain(*cv_img);
  1774. return 0;
  1775. }
  1776. memset(widths, 0, sizeof(widths));
  1777. memset(heights, 0, sizeof(heights));
  1778. memset(strides, 0, sizeof(strides));
  1779. status = get_cv_pixel_info(
  1780. avctx,
  1781. frame,
  1782. &color,
  1783. &plane_count,
  1784. widths,
  1785. heights,
  1786. strides,
  1787. &contiguous_buf_size
  1788. );
  1789. if (status) {
  1790. av_log(
  1791. avctx,
  1792. AV_LOG_ERROR,
  1793. "Error: Cannot convert format %d color_range %d: %d\n",
  1794. frame->format,
  1795. frame->color_range,
  1796. status
  1797. );
  1798. return AVERROR_EXTERNAL;
  1799. }
  1800. #if TARGET_OS_IPHONE
  1801. pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
  1802. if (!pix_buf_pool) {
  1803. av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
  1804. return AVERROR_EXTERNAL;
  1805. }
  1806. status = CVPixelBufferPoolCreatePixelBuffer(NULL,
  1807. pix_buf_pool,
  1808. cv_img);
  1809. if (status) {
  1810. av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
  1811. return AVERROR_EXTERNAL;
  1812. }
  1813. status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
  1814. if (status) {
  1815. CFRelease(*cv_img);
  1816. *cv_img = NULL;
  1817. return status;
  1818. }
  1819. #else
  1820. AVFrame *enc_frame = av_frame_alloc();
  1821. if (!enc_frame) return AVERROR(ENOMEM);
  1822. status = av_frame_ref(enc_frame, frame);
  1823. if (status) {
  1824. av_frame_free(&enc_frame);
  1825. return status;
  1826. }
  1827. status = CVPixelBufferCreateWithPlanarBytes(
  1828. kCFAllocatorDefault,
  1829. enc_frame->width,
  1830. enc_frame->height,
  1831. color,
  1832. NULL,
  1833. contiguous_buf_size,
  1834. plane_count,
  1835. (void **)enc_frame->data,
  1836. widths,
  1837. heights,
  1838. strides,
  1839. free_avframe,
  1840. enc_frame,
  1841. NULL,
  1842. cv_img
  1843. );
  1844. add_color_attr(avctx, pix_buf_attachments);
  1845. CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
  1846. CFRelease(pix_buf_attachments);
  1847. if (status) {
  1848. av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
  1849. return AVERROR_EXTERNAL;
  1850. }
  1851. #endif
  1852. return 0;
  1853. }
  1854. static int create_encoder_dict_h264(const AVFrame *frame,
  1855. CFDictionaryRef* dict_out)
  1856. {
  1857. CFDictionaryRef dict = NULL;
  1858. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  1859. const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
  1860. const void *vals[] = { kCFBooleanTrue };
  1861. dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
  1862. if(!dict) return AVERROR(ENOMEM);
  1863. }
  1864. *dict_out = dict;
  1865. return 0;
  1866. }
  1867. static int vtenc_send_frame(AVCodecContext *avctx,
  1868. VTEncContext *vtctx,
  1869. const AVFrame *frame)
  1870. {
  1871. CMTime time;
  1872. CFDictionaryRef frame_dict;
  1873. CVPixelBufferRef cv_img = NULL;
  1874. AVFrameSideData *side_data = NULL;
  1875. ExtraSEI *sei = NULL;
  1876. int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
  1877. if (status) return status;
  1878. status = create_encoder_dict_h264(frame, &frame_dict);
  1879. if (status) {
  1880. CFRelease(cv_img);
  1881. return status;
  1882. }
  1883. side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
  1884. if (vtctx->a53_cc && side_data && side_data->size) {
  1885. sei = av_mallocz(sizeof(*sei));
  1886. if (!sei) {
  1887. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1888. } else {
  1889. int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
  1890. if (ret < 0) {
  1891. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1892. av_free(sei);
  1893. sei = NULL;
  1894. }
  1895. }
  1896. }
  1897. time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
  1898. status = VTCompressionSessionEncodeFrame(
  1899. vtctx->session,
  1900. cv_img,
  1901. time,
  1902. kCMTimeInvalid,
  1903. frame_dict,
  1904. sei,
  1905. NULL
  1906. );
  1907. if (frame_dict) CFRelease(frame_dict);
  1908. CFRelease(cv_img);
  1909. if (status) {
  1910. av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
  1911. return AVERROR_EXTERNAL;
  1912. }
  1913. return 0;
  1914. }
  1915. static av_cold int vtenc_frame(
  1916. AVCodecContext *avctx,
  1917. AVPacket *pkt,
  1918. const AVFrame *frame,
  1919. int *got_packet)
  1920. {
  1921. VTEncContext *vtctx = avctx->priv_data;
  1922. bool get_frame;
  1923. int status;
  1924. CMSampleBufferRef buf = NULL;
  1925. ExtraSEI *sei = NULL;
  1926. if (frame) {
  1927. status = vtenc_send_frame(avctx, vtctx, frame);
  1928. if (status) {
  1929. status = AVERROR_EXTERNAL;
  1930. goto end_nopkt;
  1931. }
  1932. if (vtctx->frame_ct_in == 0) {
  1933. vtctx->first_pts = frame->pts;
  1934. } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
  1935. vtctx->dts_delta = frame->pts - vtctx->first_pts;
  1936. }
  1937. vtctx->frame_ct_in++;
  1938. } else if(!vtctx->flushing) {
  1939. vtctx->flushing = true;
  1940. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1941. kCMTimeIndefinite);
  1942. if (status) {
  1943. av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
  1944. status = AVERROR_EXTERNAL;
  1945. goto end_nopkt;
  1946. }
  1947. }
  1948. *got_packet = 0;
  1949. get_frame = vtctx->dts_delta >= 0 || !frame;
  1950. if (!get_frame) {
  1951. status = 0;
  1952. goto end_nopkt;
  1953. }
  1954. status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
  1955. if (status) goto end_nopkt;
  1956. if (!buf) goto end_nopkt;
  1957. status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
  1958. if (sei) {
  1959. if (sei->data) av_free(sei->data);
  1960. av_free(sei);
  1961. }
  1962. CFRelease(buf);
  1963. if (status) goto end_nopkt;
  1964. *got_packet = 1;
  1965. return 0;
  1966. end_nopkt:
  1967. av_packet_unref(pkt);
  1968. return status;
  1969. }
  1970. static int vtenc_populate_extradata(AVCodecContext *avctx,
  1971. CMVideoCodecType codec_type,
  1972. CFStringRef profile_level,
  1973. CFNumberRef gamma_level,
  1974. CFDictionaryRef enc_info,
  1975. CFDictionaryRef pixel_buffer_info)
  1976. {
  1977. VTEncContext *vtctx = avctx->priv_data;
  1978. AVFrame *frame = av_frame_alloc();
  1979. int y_size = avctx->width * avctx->height;
  1980. int chroma_size = (avctx->width / 2) * (avctx->height / 2);
  1981. CMSampleBufferRef buf = NULL;
  1982. int status;
  1983. if (!frame)
  1984. return AVERROR(ENOMEM);
  1985. frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
  1986. if(!frame->buf[0]){
  1987. status = AVERROR(ENOMEM);
  1988. goto pe_cleanup;
  1989. }
  1990. status = vtenc_create_encoder(avctx,
  1991. codec_type,
  1992. profile_level,
  1993. gamma_level,
  1994. enc_info,
  1995. pixel_buffer_info,
  1996. &vtctx->session);
  1997. if (status)
  1998. goto pe_cleanup;
  1999. frame->data[0] = frame->buf[0]->data;
  2000. memset(frame->data[0], 0, y_size);
  2001. frame->data[1] = frame->buf[0]->data + y_size;
  2002. memset(frame->data[1], 128, chroma_size);
  2003. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  2004. frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
  2005. memset(frame->data[2], 128, chroma_size);
  2006. }
  2007. frame->linesize[0] = avctx->width;
  2008. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  2009. frame->linesize[1] =
  2010. frame->linesize[2] = (avctx->width + 1) / 2;
  2011. } else {
  2012. frame->linesize[1] = (avctx->width + 1) / 2;
  2013. }
  2014. frame->format = avctx->pix_fmt;
  2015. frame->width = avctx->width;
  2016. frame->height = avctx->height;
  2017. frame->colorspace = avctx->colorspace;
  2018. frame->color_range = avctx->color_range;
  2019. frame->color_trc = avctx->color_trc;
  2020. frame->color_primaries = avctx->color_primaries;
  2021. frame->pts = 0;
  2022. status = vtenc_send_frame(avctx, vtctx, frame);
  2023. if (status) {
  2024. av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
  2025. goto pe_cleanup;
  2026. }
  2027. //Populates extradata - output frames are flushed and param sets are available.
  2028. status = VTCompressionSessionCompleteFrames(vtctx->session,
  2029. kCMTimeIndefinite);
  2030. if (status)
  2031. goto pe_cleanup;
  2032. status = vtenc_q_pop(vtctx, 0, &buf, NULL);
  2033. if (status) {
  2034. av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
  2035. goto pe_cleanup;
  2036. }
  2037. CFRelease(buf);
  2038. pe_cleanup:
  2039. if(vtctx->session)
  2040. CFRelease(vtctx->session);
  2041. vtctx->session = NULL;
  2042. vtctx->frame_ct_out = 0;
  2043. av_frame_unref(frame);
  2044. av_frame_free(&frame);
  2045. av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
  2046. return status;
  2047. }
  2048. static av_cold int vtenc_close(AVCodecContext *avctx)
  2049. {
  2050. VTEncContext *vtctx = avctx->priv_data;
  2051. pthread_cond_destroy(&vtctx->cv_sample_sent);
  2052. pthread_mutex_destroy(&vtctx->lock);
  2053. if(!vtctx->session) return 0;
  2054. VTCompressionSessionCompleteFrames(vtctx->session,
  2055. kCMTimeIndefinite);
  2056. clear_frame_queue(vtctx);
  2057. CFRelease(vtctx->session);
  2058. vtctx->session = NULL;
  2059. if (vtctx->color_primaries) {
  2060. CFRelease(vtctx->color_primaries);
  2061. vtctx->color_primaries = NULL;
  2062. }
  2063. if (vtctx->transfer_function) {
  2064. CFRelease(vtctx->transfer_function);
  2065. vtctx->transfer_function = NULL;
  2066. }
  2067. if (vtctx->ycbcr_matrix) {
  2068. CFRelease(vtctx->ycbcr_matrix);
  2069. vtctx->ycbcr_matrix = NULL;
  2070. }
  2071. return 0;
  2072. }
  2073. static const enum AVPixelFormat pix_fmts[] = {
  2074. AV_PIX_FMT_VIDEOTOOLBOX,
  2075. AV_PIX_FMT_NV12,
  2076. AV_PIX_FMT_YUV420P,
  2077. AV_PIX_FMT_NONE
  2078. };
  2079. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  2080. #define COMMON_OPTIONS \
  2081. { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
  2082. { .i64 = 0 }, 0, 1, VE }, \
  2083. { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
  2084. OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
  2085. { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
  2086. OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
  2087. { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
  2088. OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  2089. #define OFFSET(x) offsetof(VTEncContext, x)
  2090. static const AVOption h264_options[] = {
  2091. { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
  2092. { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
  2093. { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
  2094. { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
  2095. { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
  2096. { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
  2097. { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
  2098. { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
  2099. { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
  2100. { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
  2101. { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
  2102. { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
  2103. { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
  2104. { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
  2105. { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
  2106. { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
  2107. { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  2108. { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  2109. { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  2110. { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  2111. { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
  2112. COMMON_OPTIONS
  2113. { NULL },
  2114. };
  2115. static const AVClass h264_videotoolbox_class = {
  2116. .class_name = "h264_videotoolbox",
  2117. .item_name = av_default_item_name,
  2118. .option = h264_options,
  2119. .version = LIBAVUTIL_VERSION_INT,
  2120. };
  2121. AVCodec ff_h264_videotoolbox_encoder = {
  2122. .name = "h264_videotoolbox",
  2123. .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
  2124. .type = AVMEDIA_TYPE_VIDEO,
  2125. .id = AV_CODEC_ID_H264,
  2126. .priv_data_size = sizeof(VTEncContext),
  2127. .pix_fmts = pix_fmts,
  2128. .init = vtenc_init,
  2129. .encode2 = vtenc_frame,
  2130. .close = vtenc_close,
  2131. .capabilities = AV_CODEC_CAP_DELAY,
  2132. .priv_class = &h264_videotoolbox_class,
  2133. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  2134. FF_CODEC_CAP_INIT_CLEANUP,
  2135. };
  2136. static const AVOption hevc_options[] = {
  2137. { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
  2138. { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
  2139. { "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
  2140. COMMON_OPTIONS
  2141. { NULL },
  2142. };
  2143. static const AVClass hevc_videotoolbox_class = {
  2144. .class_name = "hevc_videotoolbox",
  2145. .item_name = av_default_item_name,
  2146. .option = hevc_options,
  2147. .version = LIBAVUTIL_VERSION_INT,
  2148. };
  2149. AVCodec ff_hevc_videotoolbox_encoder = {
  2150. .name = "hevc_videotoolbox",
  2151. .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
  2152. .type = AVMEDIA_TYPE_VIDEO,
  2153. .id = AV_CODEC_ID_HEVC,
  2154. .priv_data_size = sizeof(VTEncContext),
  2155. .pix_fmts = pix_fmts,
  2156. .init = vtenc_init,
  2157. .encode2 = vtenc_frame,
  2158. .close = vtenc_close,
  2159. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
  2160. .priv_class = &hevc_videotoolbox_class,
  2161. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  2162. FF_CODEC_CAP_INIT_CLEANUP,
  2163. .wrapper_name = "videotoolbox",
  2164. };