You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2641 lines
85KB

  1. /*
  2. * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <VideoToolbox/VideoToolbox.h>
  21. #include <CoreVideo/CoreVideo.h>
  22. #include <CoreMedia/CoreMedia.h>
  23. #include <TargetConditionals.h>
  24. #include <Availability.h>
  25. #include "avcodec.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/avstring.h"
  29. #include "libavcodec/avcodec.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "internal.h"
  32. #include <pthread.h>
  33. #include "atsc_a53.h"
  34. #include "h264.h"
  35. #include "h264_sei.h"
  36. #include <dlfcn.h>
  37. #if !HAVE_KCMVIDEOCODECTYPE_HEVC
  38. enum { kCMVideoCodecType_HEVC = 'hvc1' };
  39. #endif
  40. #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
  41. enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
  42. enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
  43. #endif
  44. typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
  45. size_t parameterSetIndex,
  46. const uint8_t **parameterSetPointerOut,
  47. size_t *parameterSetSizeOut,
  48. size_t *parameterSetCountOut,
  49. int *NALUnitHeaderLengthOut);
  50. //These symbols may not be present
  51. static struct{
  52. CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
  53. CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
  54. CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
  55. CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
  56. CFStringRef kVTH264EntropyMode_CAVLC;
  57. CFStringRef kVTH264EntropyMode_CABAC;
  58. CFStringRef kVTProfileLevel_H264_Baseline_4_0;
  59. CFStringRef kVTProfileLevel_H264_Baseline_4_2;
  60. CFStringRef kVTProfileLevel_H264_Baseline_5_0;
  61. CFStringRef kVTProfileLevel_H264_Baseline_5_1;
  62. CFStringRef kVTProfileLevel_H264_Baseline_5_2;
  63. CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
  64. CFStringRef kVTProfileLevel_H264_Main_4_2;
  65. CFStringRef kVTProfileLevel_H264_Main_5_1;
  66. CFStringRef kVTProfileLevel_H264_Main_5_2;
  67. CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
  68. CFStringRef kVTProfileLevel_H264_High_3_0;
  69. CFStringRef kVTProfileLevel_H264_High_3_1;
  70. CFStringRef kVTProfileLevel_H264_High_3_2;
  71. CFStringRef kVTProfileLevel_H264_High_4_0;
  72. CFStringRef kVTProfileLevel_H264_High_4_1;
  73. CFStringRef kVTProfileLevel_H264_High_4_2;
  74. CFStringRef kVTProfileLevel_H264_High_5_1;
  75. CFStringRef kVTProfileLevel_H264_High_5_2;
  76. CFStringRef kVTProfileLevel_H264_High_AutoLevel;
  77. CFStringRef kVTProfileLevel_H264_Extended_5_0;
  78. CFStringRef kVTProfileLevel_H264_Extended_AutoLevel;
  79. CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
  80. CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
  81. CFStringRef kVTCompressionPropertyKey_RealTime;
  82. CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
  83. CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
  84. getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
  85. } compat_keys;
  86. #define GET_SYM(symbol, defaultVal) \
  87. do{ \
  88. CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol); \
  89. if(!handle) \
  90. compat_keys.symbol = CFSTR(defaultVal); \
  91. else \
  92. compat_keys.symbol = *handle; \
  93. }while(0)
  94. static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
  95. static void loadVTEncSymbols(){
  96. compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
  97. (getParameterSetAtIndex)dlsym(
  98. RTLD_DEFAULT,
  99. "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
  100. );
  101. GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020, "ITU_R_2020");
  102. GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
  103. GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020, "ITU_R_2020");
  104. GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
  105. GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
  106. GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
  107. GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
  108. GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
  109. GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
  110. GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
  111. GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
  112. GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
  113. GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
  114. GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
  115. GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
  116. GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
  117. GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
  118. GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
  119. GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
  120. GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
  121. GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
  122. GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
  123. GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
  124. GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
  125. GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
  126. GET_SYM(kVTProfileLevel_H264_Extended_5_0, "H264_Extended_5_0");
  127. GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
  128. GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel, "HEVC_Main_AutoLevel");
  129. GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel, "HEVC_Main10_AutoLevel");
  130. GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
  131. GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
  132. "EnableHardwareAcceleratedVideoEncoder");
  133. GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
  134. "RequireHardwareAcceleratedVideoEncoder");
  135. }
  136. typedef enum VT_H264Profile {
  137. H264_PROF_AUTO,
  138. H264_PROF_BASELINE,
  139. H264_PROF_MAIN,
  140. H264_PROF_HIGH,
  141. H264_PROF_EXTENDED,
  142. H264_PROF_COUNT
  143. } VT_H264Profile;
  144. typedef enum VTH264Entropy{
  145. VT_ENTROPY_NOT_SET,
  146. VT_CAVLC,
  147. VT_CABAC
  148. } VTH264Entropy;
  149. typedef enum VT_HEVCProfile {
  150. HEVC_PROF_AUTO,
  151. HEVC_PROF_MAIN,
  152. HEVC_PROF_MAIN10,
  153. HEVC_PROF_COUNT
  154. } VT_HEVCProfile;
  155. static const uint8_t start_code[] = { 0, 0, 0, 1 };
  156. typedef struct ExtraSEI {
  157. void *data;
  158. size_t size;
  159. } ExtraSEI;
  160. typedef struct BufNode {
  161. CMSampleBufferRef cm_buffer;
  162. ExtraSEI *sei;
  163. struct BufNode* next;
  164. int error;
  165. } BufNode;
  166. typedef struct VTEncContext {
  167. AVClass *class;
  168. enum AVCodecID codec_id;
  169. VTCompressionSessionRef session;
  170. CFStringRef ycbcr_matrix;
  171. CFStringRef color_primaries;
  172. CFStringRef transfer_function;
  173. getParameterSetAtIndex get_param_set_func;
  174. pthread_mutex_t lock;
  175. pthread_cond_t cv_sample_sent;
  176. int async_error;
  177. BufNode *q_head;
  178. BufNode *q_tail;
  179. int64_t frame_ct_out;
  180. int64_t frame_ct_in;
  181. int64_t first_pts;
  182. int64_t dts_delta;
  183. int64_t profile;
  184. int64_t level;
  185. int64_t entropy;
  186. int64_t realtime;
  187. int64_t frames_before;
  188. int64_t frames_after;
  189. int64_t allow_sw;
  190. int64_t require_sw;
  191. bool flushing;
  192. bool has_b_frames;
  193. bool warned_color_range;
  194. /* can't be bool type since AVOption will access it as int */
  195. int a53_cc;
  196. } VTEncContext;
  197. static int vtenc_populate_extradata(AVCodecContext *avctx,
  198. CMVideoCodecType codec_type,
  199. CFStringRef profile_level,
  200. CFNumberRef gamma_level,
  201. CFDictionaryRef enc_info,
  202. CFDictionaryRef pixel_buffer_info);
  203. /**
  204. * NULL-safe release of *refPtr, and sets value to NULL.
  205. */
  206. static void vt_release_num(CFNumberRef* refPtr){
  207. if (!*refPtr) {
  208. return;
  209. }
  210. CFRelease(*refPtr);
  211. *refPtr = NULL;
  212. }
  213. static void set_async_error(VTEncContext *vtctx, int err)
  214. {
  215. BufNode *info;
  216. pthread_mutex_lock(&vtctx->lock);
  217. vtctx->async_error = err;
  218. info = vtctx->q_head;
  219. vtctx->q_head = vtctx->q_tail = NULL;
  220. while (info) {
  221. BufNode *next = info->next;
  222. CFRelease(info->cm_buffer);
  223. av_free(info);
  224. info = next;
  225. }
  226. pthread_mutex_unlock(&vtctx->lock);
  227. }
  228. static void clear_frame_queue(VTEncContext *vtctx)
  229. {
  230. set_async_error(vtctx, 0);
  231. }
  232. static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
  233. {
  234. BufNode *info;
  235. pthread_mutex_lock(&vtctx->lock);
  236. if (vtctx->async_error) {
  237. pthread_mutex_unlock(&vtctx->lock);
  238. return vtctx->async_error;
  239. }
  240. if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
  241. *buf = NULL;
  242. pthread_mutex_unlock(&vtctx->lock);
  243. return 0;
  244. }
  245. while (!vtctx->q_head && !vtctx->async_error && wait && !vtctx->flushing) {
  246. pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
  247. }
  248. if (!vtctx->q_head) {
  249. pthread_mutex_unlock(&vtctx->lock);
  250. *buf = NULL;
  251. return 0;
  252. }
  253. info = vtctx->q_head;
  254. vtctx->q_head = vtctx->q_head->next;
  255. if (!vtctx->q_head) {
  256. vtctx->q_tail = NULL;
  257. }
  258. vtctx->frame_ct_out++;
  259. pthread_mutex_unlock(&vtctx->lock);
  260. *buf = info->cm_buffer;
  261. if (sei && *buf) {
  262. *sei = info->sei;
  263. } else if (info->sei) {
  264. if (info->sei->data) av_free(info->sei->data);
  265. av_free(info->sei);
  266. }
  267. av_free(info);
  268. return 0;
  269. }
  270. static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
  271. {
  272. BufNode *info = av_malloc(sizeof(BufNode));
  273. if (!info) {
  274. set_async_error(vtctx, AVERROR(ENOMEM));
  275. return;
  276. }
  277. CFRetain(buffer);
  278. info->cm_buffer = buffer;
  279. info->sei = sei;
  280. info->next = NULL;
  281. pthread_mutex_lock(&vtctx->lock);
  282. if (!vtctx->q_head) {
  283. vtctx->q_head = info;
  284. } else {
  285. vtctx->q_tail->next = info;
  286. }
  287. vtctx->q_tail = info;
  288. pthread_cond_signal(&vtctx->cv_sample_sent);
  289. pthread_mutex_unlock(&vtctx->lock);
  290. }
  291. static int count_nalus(size_t length_code_size,
  292. CMSampleBufferRef sample_buffer,
  293. int *count)
  294. {
  295. size_t offset = 0;
  296. int status;
  297. int nalu_ct = 0;
  298. uint8_t size_buf[4];
  299. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  300. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  301. if (length_code_size > 4)
  302. return AVERROR_INVALIDDATA;
  303. while (offset < src_size) {
  304. size_t curr_src_len;
  305. size_t box_len = 0;
  306. size_t i;
  307. status = CMBlockBufferCopyDataBytes(block,
  308. offset,
  309. length_code_size,
  310. size_buf);
  311. for (i = 0; i < length_code_size; i++) {
  312. box_len <<= 8;
  313. box_len |= size_buf[i];
  314. }
  315. curr_src_len = box_len + length_code_size;
  316. offset += curr_src_len;
  317. nalu_ct++;
  318. }
  319. *count = nalu_ct;
  320. return 0;
  321. }
  322. static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
  323. {
  324. switch (id) {
  325. case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
  326. case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
  327. default: return 0;
  328. }
  329. }
  330. /**
  331. * Get the parameter sets from a CMSampleBufferRef.
  332. * @param dst If *dst isn't NULL, the parameters are copied into existing
  333. * memory. *dst_size must be set accordingly when *dst != NULL.
  334. * If *dst is NULL, it will be allocated.
  335. * In all cases, *dst_size is set to the number of bytes used starting
  336. * at *dst.
  337. */
  338. static int get_params_size(
  339. AVCodecContext *avctx,
  340. CMVideoFormatDescriptionRef vid_fmt,
  341. size_t *size)
  342. {
  343. VTEncContext *vtctx = avctx->priv_data;
  344. size_t total_size = 0;
  345. size_t ps_count;
  346. int is_count_bad = 0;
  347. size_t i;
  348. int status;
  349. status = vtctx->get_param_set_func(vid_fmt,
  350. 0,
  351. NULL,
  352. NULL,
  353. &ps_count,
  354. NULL);
  355. if (status) {
  356. is_count_bad = 1;
  357. ps_count = 0;
  358. status = 0;
  359. }
  360. for (i = 0; i < ps_count || is_count_bad; i++) {
  361. const uint8_t *ps;
  362. size_t ps_size;
  363. status = vtctx->get_param_set_func(vid_fmt,
  364. i,
  365. &ps,
  366. &ps_size,
  367. NULL,
  368. NULL);
  369. if (status) {
  370. /*
  371. * When ps_count is invalid, status != 0 ends the loop normally
  372. * unless we didn't get any parameter sets.
  373. */
  374. if (i > 0 && is_count_bad) status = 0;
  375. break;
  376. }
  377. total_size += ps_size + sizeof(start_code);
  378. }
  379. if (status) {
  380. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
  381. return AVERROR_EXTERNAL;
  382. }
  383. *size = total_size;
  384. return 0;
  385. }
  386. static int copy_param_sets(
  387. AVCodecContext *avctx,
  388. CMVideoFormatDescriptionRef vid_fmt,
  389. uint8_t *dst,
  390. size_t dst_size)
  391. {
  392. VTEncContext *vtctx = avctx->priv_data;
  393. size_t ps_count;
  394. int is_count_bad = 0;
  395. int status;
  396. size_t offset = 0;
  397. size_t i;
  398. status = vtctx->get_param_set_func(vid_fmt,
  399. 0,
  400. NULL,
  401. NULL,
  402. &ps_count,
  403. NULL);
  404. if (status) {
  405. is_count_bad = 1;
  406. ps_count = 0;
  407. status = 0;
  408. }
  409. for (i = 0; i < ps_count || is_count_bad; i++) {
  410. const uint8_t *ps;
  411. size_t ps_size;
  412. size_t next_offset;
  413. status = vtctx->get_param_set_func(vid_fmt,
  414. i,
  415. &ps,
  416. &ps_size,
  417. NULL,
  418. NULL);
  419. if (status) {
  420. if (i > 0 && is_count_bad) status = 0;
  421. break;
  422. }
  423. next_offset = offset + sizeof(start_code) + ps_size;
  424. if (dst_size < next_offset) {
  425. av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
  426. return AVERROR_BUFFER_TOO_SMALL;
  427. }
  428. memcpy(dst + offset, start_code, sizeof(start_code));
  429. offset += sizeof(start_code);
  430. memcpy(dst + offset, ps, ps_size);
  431. offset = next_offset;
  432. }
  433. if (status) {
  434. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
  435. return AVERROR_EXTERNAL;
  436. }
  437. return 0;
  438. }
  439. static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
  440. {
  441. CMVideoFormatDescriptionRef vid_fmt;
  442. size_t total_size;
  443. int status;
  444. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  445. if (!vid_fmt) {
  446. av_log(avctx, AV_LOG_ERROR, "No video format.\n");
  447. return AVERROR_EXTERNAL;
  448. }
  449. status = get_params_size(avctx, vid_fmt, &total_size);
  450. if (status) {
  451. av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
  452. return status;
  453. }
  454. avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
  455. if (!avctx->extradata) {
  456. return AVERROR(ENOMEM);
  457. }
  458. avctx->extradata_size = total_size;
  459. status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
  460. if (status) {
  461. av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
  462. return status;
  463. }
  464. return 0;
  465. }
  466. static void vtenc_output_callback(
  467. void *ctx,
  468. void *sourceFrameCtx,
  469. OSStatus status,
  470. VTEncodeInfoFlags flags,
  471. CMSampleBufferRef sample_buffer)
  472. {
  473. AVCodecContext *avctx = ctx;
  474. VTEncContext *vtctx = avctx->priv_data;
  475. ExtraSEI *sei = sourceFrameCtx;
  476. if (vtctx->async_error) {
  477. return;
  478. }
  479. if (status) {
  480. av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
  481. set_async_error(vtctx, AVERROR_EXTERNAL);
  482. return;
  483. }
  484. if (!sample_buffer) {
  485. return;
  486. }
  487. if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  488. int set_status = set_extradata(avctx, sample_buffer);
  489. if (set_status) {
  490. set_async_error(vtctx, set_status);
  491. return;
  492. }
  493. }
  494. vtenc_q_push(vtctx, sample_buffer, sei);
  495. }
  496. static int get_length_code_size(
  497. AVCodecContext *avctx,
  498. CMSampleBufferRef sample_buffer,
  499. size_t *size)
  500. {
  501. VTEncContext *vtctx = avctx->priv_data;
  502. CMVideoFormatDescriptionRef vid_fmt;
  503. int isize;
  504. int status;
  505. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  506. if (!vid_fmt) {
  507. av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
  508. return AVERROR_EXTERNAL;
  509. }
  510. status = vtctx->get_param_set_func(vid_fmt,
  511. 0,
  512. NULL,
  513. NULL,
  514. NULL,
  515. &isize);
  516. if (status) {
  517. av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
  518. return AVERROR_EXTERNAL;
  519. }
  520. *size = isize;
  521. return 0;
  522. }
  523. /*
  524. * Returns true on success.
  525. *
  526. * If profile_level_val is NULL and this method returns true, don't specify the
  527. * profile/level to the encoder.
  528. */
  529. static bool get_vt_h264_profile_level(AVCodecContext *avctx,
  530. CFStringRef *profile_level_val)
  531. {
  532. VTEncContext *vtctx = avctx->priv_data;
  533. int64_t profile = vtctx->profile;
  534. if (profile == H264_PROF_AUTO && vtctx->level) {
  535. //Need to pick a profile if level is not auto-selected.
  536. profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
  537. }
  538. *profile_level_val = NULL;
  539. switch (profile) {
  540. case H264_PROF_AUTO:
  541. return true;
  542. case H264_PROF_BASELINE:
  543. switch (vtctx->level) {
  544. case 0: *profile_level_val =
  545. compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
  546. case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
  547. case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
  548. case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
  549. case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
  550. case 40: *profile_level_val =
  551. compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
  552. case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
  553. case 42: *profile_level_val =
  554. compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
  555. case 50: *profile_level_val =
  556. compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
  557. case 51: *profile_level_val =
  558. compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
  559. case 52: *profile_level_val =
  560. compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
  561. }
  562. break;
  563. case H264_PROF_MAIN:
  564. switch (vtctx->level) {
  565. case 0: *profile_level_val =
  566. compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
  567. case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
  568. case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
  569. case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
  570. case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
  571. case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
  572. case 42: *profile_level_val =
  573. compat_keys.kVTProfileLevel_H264_Main_4_2; break;
  574. case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
  575. case 51: *profile_level_val =
  576. compat_keys.kVTProfileLevel_H264_Main_5_1; break;
  577. case 52: *profile_level_val =
  578. compat_keys.kVTProfileLevel_H264_Main_5_2; break;
  579. }
  580. break;
  581. case H264_PROF_HIGH:
  582. switch (vtctx->level) {
  583. case 0: *profile_level_val =
  584. compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
  585. case 30: *profile_level_val =
  586. compat_keys.kVTProfileLevel_H264_High_3_0; break;
  587. case 31: *profile_level_val =
  588. compat_keys.kVTProfileLevel_H264_High_3_1; break;
  589. case 32: *profile_level_val =
  590. compat_keys.kVTProfileLevel_H264_High_3_2; break;
  591. case 40: *profile_level_val =
  592. compat_keys.kVTProfileLevel_H264_High_4_0; break;
  593. case 41: *profile_level_val =
  594. compat_keys.kVTProfileLevel_H264_High_4_1; break;
  595. case 42: *profile_level_val =
  596. compat_keys.kVTProfileLevel_H264_High_4_2; break;
  597. case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
  598. case 51: *profile_level_val =
  599. compat_keys.kVTProfileLevel_H264_High_5_1; break;
  600. case 52: *profile_level_val =
  601. compat_keys.kVTProfileLevel_H264_High_5_2; break;
  602. }
  603. break;
  604. case H264_PROF_EXTENDED:
  605. switch (vtctx->level) {
  606. case 0: *profile_level_val =
  607. compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
  608. case 50: *profile_level_val =
  609. compat_keys.kVTProfileLevel_H264_Extended_5_0; break;
  610. }
  611. break;
  612. }
  613. if (!*profile_level_val) {
  614. av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
  615. return false;
  616. }
  617. return true;
  618. }
  619. /*
  620. * Returns true on success.
  621. *
  622. * If profile_level_val is NULL and this method returns true, don't specify the
  623. * profile/level to the encoder.
  624. */
  625. static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
  626. CFStringRef *profile_level_val)
  627. {
  628. VTEncContext *vtctx = avctx->priv_data;
  629. int64_t profile = vtctx->profile;
  630. *profile_level_val = NULL;
  631. switch (profile) {
  632. case HEVC_PROF_AUTO:
  633. return true;
  634. case HEVC_PROF_MAIN:
  635. *profile_level_val =
  636. compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
  637. break;
  638. case HEVC_PROF_MAIN10:
  639. *profile_level_val =
  640. compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
  641. break;
  642. }
  643. if (!*profile_level_val) {
  644. av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
  645. return false;
  646. }
  647. return true;
  648. }
  649. static int get_cv_pixel_format(AVCodecContext* avctx,
  650. enum AVPixelFormat fmt,
  651. enum AVColorRange range,
  652. int* av_pixel_format,
  653. int* range_guessed)
  654. {
  655. if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
  656. range != AVCOL_RANGE_JPEG;
  657. //MPEG range is used when no range is set
  658. if (fmt == AV_PIX_FMT_NV12) {
  659. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  660. kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
  661. kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  662. } else if (fmt == AV_PIX_FMT_YUV420P) {
  663. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  664. kCVPixelFormatType_420YpCbCr8PlanarFullRange :
  665. kCVPixelFormatType_420YpCbCr8Planar;
  666. } else if (fmt == AV_PIX_FMT_P010LE) {
  667. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  668. kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
  669. kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
  670. } else {
  671. return AVERROR(EINVAL);
  672. }
  673. return 0;
  674. }
  675. static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
  676. VTEncContext *vtctx = avctx->priv_data;
  677. if (vtctx->color_primaries) {
  678. CFDictionarySetValue(dict,
  679. kCVImageBufferColorPrimariesKey,
  680. vtctx->color_primaries);
  681. }
  682. if (vtctx->transfer_function) {
  683. CFDictionarySetValue(dict,
  684. kCVImageBufferTransferFunctionKey,
  685. vtctx->transfer_function);
  686. }
  687. if (vtctx->ycbcr_matrix) {
  688. CFDictionarySetValue(dict,
  689. kCVImageBufferYCbCrMatrixKey,
  690. vtctx->ycbcr_matrix);
  691. }
  692. }
  693. static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
  694. CFMutableDictionaryRef* dict)
  695. {
  696. CFNumberRef cv_color_format_num = NULL;
  697. CFNumberRef width_num = NULL;
  698. CFNumberRef height_num = NULL;
  699. CFMutableDictionaryRef pixel_buffer_info = NULL;
  700. int cv_color_format;
  701. int status = get_cv_pixel_format(avctx,
  702. avctx->pix_fmt,
  703. avctx->color_range,
  704. &cv_color_format,
  705. NULL);
  706. if (status) return status;
  707. pixel_buffer_info = CFDictionaryCreateMutable(
  708. kCFAllocatorDefault,
  709. 20,
  710. &kCFCopyStringDictionaryKeyCallBacks,
  711. &kCFTypeDictionaryValueCallBacks);
  712. if (!pixel_buffer_info) goto pbinfo_nomem;
  713. cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
  714. kCFNumberSInt32Type,
  715. &cv_color_format);
  716. if (!cv_color_format_num) goto pbinfo_nomem;
  717. CFDictionarySetValue(pixel_buffer_info,
  718. kCVPixelBufferPixelFormatTypeKey,
  719. cv_color_format_num);
  720. vt_release_num(&cv_color_format_num);
  721. width_num = CFNumberCreate(kCFAllocatorDefault,
  722. kCFNumberSInt32Type,
  723. &avctx->width);
  724. if (!width_num) return AVERROR(ENOMEM);
  725. CFDictionarySetValue(pixel_buffer_info,
  726. kCVPixelBufferWidthKey,
  727. width_num);
  728. vt_release_num(&width_num);
  729. height_num = CFNumberCreate(kCFAllocatorDefault,
  730. kCFNumberSInt32Type,
  731. &avctx->height);
  732. if (!height_num) goto pbinfo_nomem;
  733. CFDictionarySetValue(pixel_buffer_info,
  734. kCVPixelBufferHeightKey,
  735. height_num);
  736. vt_release_num(&height_num);
  737. add_color_attr(avctx, pixel_buffer_info);
  738. *dict = pixel_buffer_info;
  739. return 0;
  740. pbinfo_nomem:
  741. vt_release_num(&cv_color_format_num);
  742. vt_release_num(&width_num);
  743. vt_release_num(&height_num);
  744. if (pixel_buffer_info) CFRelease(pixel_buffer_info);
  745. return AVERROR(ENOMEM);
  746. }
  747. static int get_cv_color_primaries(AVCodecContext *avctx,
  748. CFStringRef *primaries)
  749. {
  750. enum AVColorPrimaries pri = avctx->color_primaries;
  751. switch (pri) {
  752. case AVCOL_PRI_UNSPECIFIED:
  753. *primaries = NULL;
  754. break;
  755. case AVCOL_PRI_BT470BG:
  756. *primaries = kCVImageBufferColorPrimaries_EBU_3213;
  757. break;
  758. case AVCOL_PRI_SMPTE170M:
  759. *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
  760. break;
  761. case AVCOL_PRI_BT709:
  762. *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
  763. break;
  764. case AVCOL_PRI_BT2020:
  765. *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
  766. break;
  767. default:
  768. av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
  769. *primaries = NULL;
  770. return -1;
  771. }
  772. return 0;
  773. }
  774. static int get_cv_transfer_function(AVCodecContext *avctx,
  775. CFStringRef *transfer_fnc,
  776. CFNumberRef *gamma_level)
  777. {
  778. enum AVColorTransferCharacteristic trc = avctx->color_trc;
  779. Float32 gamma;
  780. *gamma_level = NULL;
  781. switch (trc) {
  782. case AVCOL_TRC_UNSPECIFIED:
  783. *transfer_fnc = NULL;
  784. break;
  785. case AVCOL_TRC_BT709:
  786. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
  787. break;
  788. case AVCOL_TRC_SMPTE240M:
  789. *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
  790. break;
  791. #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
  792. case AVCOL_TRC_SMPTE2084:
  793. *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
  794. break;
  795. #endif
  796. #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
  797. case AVCOL_TRC_LINEAR:
  798. *transfer_fnc = kCVImageBufferTransferFunction_Linear;
  799. break;
  800. #endif
  801. #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
  802. case AVCOL_TRC_ARIB_STD_B67:
  803. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
  804. break;
  805. #endif
  806. case AVCOL_TRC_GAMMA22:
  807. gamma = 2.2;
  808. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  809. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  810. break;
  811. case AVCOL_TRC_GAMMA28:
  812. gamma = 2.8;
  813. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  814. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  815. break;
  816. case AVCOL_TRC_BT2020_10:
  817. case AVCOL_TRC_BT2020_12:
  818. *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
  819. break;
  820. default:
  821. *transfer_fnc = NULL;
  822. av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
  823. return -1;
  824. }
  825. return 0;
  826. }
  827. static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
  828. switch(avctx->colorspace) {
  829. case AVCOL_SPC_BT709:
  830. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
  831. break;
  832. case AVCOL_SPC_UNSPECIFIED:
  833. *matrix = NULL;
  834. break;
  835. case AVCOL_SPC_BT470BG:
  836. case AVCOL_SPC_SMPTE170M:
  837. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
  838. break;
  839. case AVCOL_SPC_SMPTE240M:
  840. *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
  841. break;
  842. case AVCOL_SPC_BT2020_NCL:
  843. *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
  844. break;
  845. default:
  846. av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
  847. return -1;
  848. }
  849. return 0;
  850. }
  851. static int vtenc_create_encoder(AVCodecContext *avctx,
  852. CMVideoCodecType codec_type,
  853. CFStringRef profile_level,
  854. CFNumberRef gamma_level,
  855. CFDictionaryRef enc_info,
  856. CFDictionaryRef pixel_buffer_info,
  857. VTCompressionSessionRef *session)
  858. {
  859. VTEncContext *vtctx = avctx->priv_data;
  860. SInt32 bit_rate = avctx->bit_rate;
  861. SInt32 max_rate = avctx->rc_max_rate;
  862. CFNumberRef bit_rate_num;
  863. CFNumberRef bytes_per_second;
  864. CFNumberRef one_second;
  865. CFArrayRef data_rate_limits;
  866. int64_t bytes_per_second_value = 0;
  867. int64_t one_second_value = 0;
  868. void *nums[2];
  869. int status = VTCompressionSessionCreate(kCFAllocatorDefault,
  870. avctx->width,
  871. avctx->height,
  872. codec_type,
  873. enc_info,
  874. pixel_buffer_info,
  875. kCFAllocatorDefault,
  876. vtenc_output_callback,
  877. avctx,
  878. session);
  879. if (status || !vtctx->session) {
  880. av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
  881. #if !TARGET_OS_IPHONE
  882. if (!vtctx->allow_sw) {
  883. av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
  884. }
  885. #endif
  886. return AVERROR_EXTERNAL;
  887. }
  888. bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
  889. kCFNumberSInt32Type,
  890. &bit_rate);
  891. if (!bit_rate_num) return AVERROR(ENOMEM);
  892. status = VTSessionSetProperty(vtctx->session,
  893. kVTCompressionPropertyKey_AverageBitRate,
  894. bit_rate_num);
  895. CFRelease(bit_rate_num);
  896. if (status) {
  897. av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
  898. return AVERROR_EXTERNAL;
  899. }
  900. if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
  901. // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
  902. bytes_per_second_value = max_rate >> 3;
  903. bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
  904. kCFNumberSInt64Type,
  905. &bytes_per_second_value);
  906. if (!bytes_per_second) {
  907. return AVERROR(ENOMEM);
  908. }
  909. one_second_value = 1;
  910. one_second = CFNumberCreate(kCFAllocatorDefault,
  911. kCFNumberSInt64Type,
  912. &one_second_value);
  913. if (!one_second) {
  914. CFRelease(bytes_per_second);
  915. return AVERROR(ENOMEM);
  916. }
  917. nums[0] = (void *)bytes_per_second;
  918. nums[1] = (void *)one_second;
  919. data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
  920. (const void **)nums,
  921. 2,
  922. &kCFTypeArrayCallBacks);
  923. if (!data_rate_limits) {
  924. CFRelease(bytes_per_second);
  925. CFRelease(one_second);
  926. return AVERROR(ENOMEM);
  927. }
  928. status = VTSessionSetProperty(vtctx->session,
  929. kVTCompressionPropertyKey_DataRateLimits,
  930. data_rate_limits);
  931. CFRelease(bytes_per_second);
  932. CFRelease(one_second);
  933. CFRelease(data_rate_limits);
  934. if (status) {
  935. av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
  936. return AVERROR_EXTERNAL;
  937. }
  938. }
  939. if (profile_level) {
  940. status = VTSessionSetProperty(vtctx->session,
  941. kVTCompressionPropertyKey_ProfileLevel,
  942. profile_level);
  943. if (status) {
  944. av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
  945. }
  946. }
  947. if (avctx->gop_size > 0) {
  948. CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
  949. kCFNumberIntType,
  950. &avctx->gop_size);
  951. if (!interval) {
  952. return AVERROR(ENOMEM);
  953. }
  954. status = VTSessionSetProperty(vtctx->session,
  955. kVTCompressionPropertyKey_MaxKeyFrameInterval,
  956. interval);
  957. CFRelease(interval);
  958. if (status) {
  959. av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
  960. return AVERROR_EXTERNAL;
  961. }
  962. }
  963. if (vtctx->frames_before) {
  964. status = VTSessionSetProperty(vtctx->session,
  965. kVTCompressionPropertyKey_MoreFramesBeforeStart,
  966. kCFBooleanTrue);
  967. if (status == kVTPropertyNotSupportedErr) {
  968. av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
  969. } else if (status) {
  970. av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
  971. }
  972. }
  973. if (vtctx->frames_after) {
  974. status = VTSessionSetProperty(vtctx->session,
  975. kVTCompressionPropertyKey_MoreFramesAfterEnd,
  976. kCFBooleanTrue);
  977. if (status == kVTPropertyNotSupportedErr) {
  978. av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
  979. } else if (status) {
  980. av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
  981. }
  982. }
  983. if (avctx->sample_aspect_ratio.num != 0) {
  984. CFNumberRef num;
  985. CFNumberRef den;
  986. CFMutableDictionaryRef par;
  987. AVRational *avpar = &avctx->sample_aspect_ratio;
  988. av_reduce(&avpar->num, &avpar->den,
  989. avpar->num, avpar->den,
  990. 0xFFFFFFFF);
  991. num = CFNumberCreate(kCFAllocatorDefault,
  992. kCFNumberIntType,
  993. &avpar->num);
  994. den = CFNumberCreate(kCFAllocatorDefault,
  995. kCFNumberIntType,
  996. &avpar->den);
  997. par = CFDictionaryCreateMutable(kCFAllocatorDefault,
  998. 2,
  999. &kCFCopyStringDictionaryKeyCallBacks,
  1000. &kCFTypeDictionaryValueCallBacks);
  1001. if (!par || !num || !den) {
  1002. if (par) CFRelease(par);
  1003. if (num) CFRelease(num);
  1004. if (den) CFRelease(den);
  1005. return AVERROR(ENOMEM);
  1006. }
  1007. CFDictionarySetValue(
  1008. par,
  1009. kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
  1010. num);
  1011. CFDictionarySetValue(
  1012. par,
  1013. kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
  1014. den);
  1015. status = VTSessionSetProperty(vtctx->session,
  1016. kVTCompressionPropertyKey_PixelAspectRatio,
  1017. par);
  1018. CFRelease(par);
  1019. CFRelease(num);
  1020. CFRelease(den);
  1021. if (status) {
  1022. av_log(avctx,
  1023. AV_LOG_ERROR,
  1024. "Error setting pixel aspect ratio to %d:%d: %d.\n",
  1025. avctx->sample_aspect_ratio.num,
  1026. avctx->sample_aspect_ratio.den,
  1027. status);
  1028. return AVERROR_EXTERNAL;
  1029. }
  1030. }
  1031. if (vtctx->transfer_function) {
  1032. status = VTSessionSetProperty(vtctx->session,
  1033. kVTCompressionPropertyKey_TransferFunction,
  1034. vtctx->transfer_function);
  1035. if (status) {
  1036. av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
  1037. }
  1038. }
  1039. if (vtctx->ycbcr_matrix) {
  1040. status = VTSessionSetProperty(vtctx->session,
  1041. kVTCompressionPropertyKey_YCbCrMatrix,
  1042. vtctx->ycbcr_matrix);
  1043. if (status) {
  1044. av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
  1045. }
  1046. }
  1047. if (vtctx->color_primaries) {
  1048. status = VTSessionSetProperty(vtctx->session,
  1049. kVTCompressionPropertyKey_ColorPrimaries,
  1050. vtctx->color_primaries);
  1051. if (status) {
  1052. av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
  1053. }
  1054. }
  1055. if (gamma_level) {
  1056. status = VTSessionSetProperty(vtctx->session,
  1057. kCVImageBufferGammaLevelKey,
  1058. gamma_level);
  1059. if (status) {
  1060. av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
  1061. }
  1062. }
  1063. if (!vtctx->has_b_frames) {
  1064. status = VTSessionSetProperty(vtctx->session,
  1065. kVTCompressionPropertyKey_AllowFrameReordering,
  1066. kCFBooleanFalse);
  1067. if (status) {
  1068. av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
  1069. return AVERROR_EXTERNAL;
  1070. }
  1071. }
  1072. if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
  1073. CFStringRef entropy = vtctx->entropy == VT_CABAC ?
  1074. compat_keys.kVTH264EntropyMode_CABAC:
  1075. compat_keys.kVTH264EntropyMode_CAVLC;
  1076. status = VTSessionSetProperty(vtctx->session,
  1077. compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
  1078. entropy);
  1079. if (status) {
  1080. av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
  1081. }
  1082. }
  1083. if (vtctx->realtime) {
  1084. status = VTSessionSetProperty(vtctx->session,
  1085. compat_keys.kVTCompressionPropertyKey_RealTime,
  1086. kCFBooleanTrue);
  1087. if (status) {
  1088. av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
  1089. }
  1090. }
  1091. status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
  1092. if (status) {
  1093. av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
  1094. return AVERROR_EXTERNAL;
  1095. }
  1096. return 0;
  1097. }
  1098. static int vtenc_configure_encoder(AVCodecContext *avctx)
  1099. {
  1100. CFMutableDictionaryRef enc_info;
  1101. CFMutableDictionaryRef pixel_buffer_info;
  1102. CMVideoCodecType codec_type;
  1103. VTEncContext *vtctx = avctx->priv_data;
  1104. CFStringRef profile_level;
  1105. CFNumberRef gamma_level = NULL;
  1106. int status;
  1107. codec_type = get_cm_codec_type(avctx->codec_id);
  1108. if (!codec_type) {
  1109. av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
  1110. return AVERROR(EINVAL);
  1111. }
  1112. vtctx->codec_id = avctx->codec_id;
  1113. if (vtctx->codec_id == AV_CODEC_ID_H264) {
  1114. vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
  1115. vtctx->has_b_frames = avctx->max_b_frames > 0;
  1116. if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
  1117. av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
  1118. vtctx->has_b_frames = false;
  1119. }
  1120. if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
  1121. av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
  1122. vtctx->entropy = VT_ENTROPY_NOT_SET;
  1123. }
  1124. if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
  1125. } else {
  1126. vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
  1127. if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
  1128. if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
  1129. }
  1130. enc_info = CFDictionaryCreateMutable(
  1131. kCFAllocatorDefault,
  1132. 20,
  1133. &kCFCopyStringDictionaryKeyCallBacks,
  1134. &kCFTypeDictionaryValueCallBacks
  1135. );
  1136. if (!enc_info) return AVERROR(ENOMEM);
  1137. #if !TARGET_OS_IPHONE
  1138. if(vtctx->require_sw) {
  1139. CFDictionarySetValue(enc_info,
  1140. compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
  1141. kCFBooleanFalse);
  1142. } else if (!vtctx->allow_sw) {
  1143. CFDictionarySetValue(enc_info,
  1144. compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
  1145. kCFBooleanTrue);
  1146. } else {
  1147. CFDictionarySetValue(enc_info,
  1148. compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
  1149. kCFBooleanTrue);
  1150. }
  1151. #endif
  1152. if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
  1153. status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
  1154. if (status)
  1155. goto init_cleanup;
  1156. } else {
  1157. pixel_buffer_info = NULL;
  1158. }
  1159. vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
  1160. get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
  1161. get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
  1162. get_cv_color_primaries(avctx, &vtctx->color_primaries);
  1163. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  1164. status = vtenc_populate_extradata(avctx,
  1165. codec_type,
  1166. profile_level,
  1167. gamma_level,
  1168. enc_info,
  1169. pixel_buffer_info);
  1170. if (status)
  1171. goto init_cleanup;
  1172. }
  1173. status = vtenc_create_encoder(avctx,
  1174. codec_type,
  1175. profile_level,
  1176. gamma_level,
  1177. enc_info,
  1178. pixel_buffer_info,
  1179. &vtctx->session);
  1180. init_cleanup:
  1181. if (gamma_level)
  1182. CFRelease(gamma_level);
  1183. if (pixel_buffer_info)
  1184. CFRelease(pixel_buffer_info);
  1185. CFRelease(enc_info);
  1186. return status;
  1187. }
  1188. static av_cold int vtenc_init(AVCodecContext *avctx)
  1189. {
  1190. VTEncContext *vtctx = avctx->priv_data;
  1191. CFBooleanRef has_b_frames_cfbool;
  1192. int status;
  1193. pthread_once(&once_ctrl, loadVTEncSymbols);
  1194. pthread_mutex_init(&vtctx->lock, NULL);
  1195. pthread_cond_init(&vtctx->cv_sample_sent, NULL);
  1196. vtctx->session = NULL;
  1197. status = vtenc_configure_encoder(avctx);
  1198. if (status) return status;
  1199. status = VTSessionCopyProperty(vtctx->session,
  1200. kVTCompressionPropertyKey_AllowFrameReordering,
  1201. kCFAllocatorDefault,
  1202. &has_b_frames_cfbool);
  1203. if (!status && has_b_frames_cfbool) {
  1204. //Some devices don't output B-frames for main profile, even if requested.
  1205. vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
  1206. CFRelease(has_b_frames_cfbool);
  1207. }
  1208. avctx->has_b_frames = vtctx->has_b_frames;
  1209. return 0;
  1210. }
  1211. static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
  1212. {
  1213. CFArrayRef attachments;
  1214. CFDictionaryRef attachment;
  1215. CFBooleanRef not_sync;
  1216. CFIndex len;
  1217. attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
  1218. len = !attachments ? 0 : CFArrayGetCount(attachments);
  1219. if (!len) {
  1220. *is_key_frame = true;
  1221. return;
  1222. }
  1223. attachment = CFArrayGetValueAtIndex(attachments, 0);
  1224. if (CFDictionaryGetValueIfPresent(attachment,
  1225. kCMSampleAttachmentKey_NotSync,
  1226. (const void **)&not_sync))
  1227. {
  1228. *is_key_frame = !CFBooleanGetValue(not_sync);
  1229. } else {
  1230. *is_key_frame = true;
  1231. }
  1232. }
  1233. static int is_post_sei_nal_type(int nal_type){
  1234. return nal_type != H264_NAL_SEI &&
  1235. nal_type != H264_NAL_SPS &&
  1236. nal_type != H264_NAL_PPS &&
  1237. nal_type != H264_NAL_AUD;
  1238. }
  1239. /*
  1240. * Finds the sei message start/size of type find_sei_type.
  1241. * If more than one of that type exists, the last one is returned.
  1242. */
  1243. static int find_sei_end(AVCodecContext *avctx,
  1244. uint8_t *nal_data,
  1245. size_t nal_size,
  1246. uint8_t **sei_end)
  1247. {
  1248. int nal_type;
  1249. size_t sei_payload_size = 0;
  1250. int sei_payload_type = 0;
  1251. *sei_end = NULL;
  1252. uint8_t *nal_start = nal_data;
  1253. if (!nal_size)
  1254. return 0;
  1255. nal_type = *nal_data & 0x1F;
  1256. if (nal_type != H264_NAL_SEI)
  1257. return 0;
  1258. nal_data++;
  1259. nal_size--;
  1260. if (nal_data[nal_size - 1] == 0x80)
  1261. nal_size--;
  1262. while (nal_size > 0 && *nal_data > 0) {
  1263. do{
  1264. sei_payload_type += *nal_data;
  1265. nal_data++;
  1266. nal_size--;
  1267. } while (nal_size > 0 && *nal_data == 0xFF);
  1268. if (!nal_size) {
  1269. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
  1270. return AVERROR_INVALIDDATA;
  1271. }
  1272. do{
  1273. sei_payload_size += *nal_data;
  1274. nal_data++;
  1275. nal_size--;
  1276. } while (nal_size > 0 && *nal_data == 0xFF);
  1277. if (nal_size < sei_payload_size) {
  1278. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
  1279. return AVERROR_INVALIDDATA;
  1280. }
  1281. nal_data += sei_payload_size;
  1282. nal_size -= sei_payload_size;
  1283. }
  1284. *sei_end = nal_data;
  1285. return nal_data - nal_start + 1;
  1286. }
  1287. /**
  1288. * Copies the data inserting emulation prevention bytes as needed.
  1289. * Existing data in the destination can be taken into account by providing
  1290. * dst with a dst_offset > 0.
  1291. *
  1292. * @return The number of bytes copied on success. On failure, the negative of
  1293. * the number of bytes needed to copy src is returned.
  1294. */
  1295. static int copy_emulation_prev(const uint8_t *src,
  1296. size_t src_size,
  1297. uint8_t *dst,
  1298. ssize_t dst_offset,
  1299. size_t dst_size)
  1300. {
  1301. int zeros = 0;
  1302. int wrote_bytes;
  1303. uint8_t* dst_start;
  1304. uint8_t* dst_end = dst + dst_size;
  1305. const uint8_t* src_end = src + src_size;
  1306. int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
  1307. int i;
  1308. for (i = start_at; i < dst_offset && i < dst_size; i++) {
  1309. if (!dst[i])
  1310. zeros++;
  1311. else
  1312. zeros = 0;
  1313. }
  1314. dst += dst_offset;
  1315. dst_start = dst;
  1316. for (; src < src_end; src++, dst++) {
  1317. if (zeros == 2) {
  1318. int insert_ep3_byte = *src <= 3;
  1319. if (insert_ep3_byte) {
  1320. if (dst < dst_end)
  1321. *dst = 3;
  1322. dst++;
  1323. }
  1324. zeros = 0;
  1325. }
  1326. if (dst < dst_end)
  1327. *dst = *src;
  1328. if (!*src)
  1329. zeros++;
  1330. else
  1331. zeros = 0;
  1332. }
  1333. wrote_bytes = dst - dst_start;
  1334. if (dst > dst_end)
  1335. return -wrote_bytes;
  1336. return wrote_bytes;
  1337. }
  1338. static int write_sei(const ExtraSEI *sei,
  1339. int sei_type,
  1340. uint8_t *dst,
  1341. size_t dst_size)
  1342. {
  1343. uint8_t *sei_start = dst;
  1344. size_t remaining_sei_size = sei->size;
  1345. size_t remaining_dst_size = dst_size;
  1346. int header_bytes;
  1347. int bytes_written;
  1348. ssize_t offset;
  1349. if (!remaining_dst_size)
  1350. return AVERROR_BUFFER_TOO_SMALL;
  1351. while (sei_type && remaining_dst_size != 0) {
  1352. int sei_byte = sei_type > 255 ? 255 : sei_type;
  1353. *dst = sei_byte;
  1354. sei_type -= sei_byte;
  1355. dst++;
  1356. remaining_dst_size--;
  1357. }
  1358. if (!dst_size)
  1359. return AVERROR_BUFFER_TOO_SMALL;
  1360. while (remaining_sei_size && remaining_dst_size != 0) {
  1361. int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
  1362. *dst = size_byte;
  1363. remaining_sei_size -= size_byte;
  1364. dst++;
  1365. remaining_dst_size--;
  1366. }
  1367. if (remaining_dst_size < sei->size)
  1368. return AVERROR_BUFFER_TOO_SMALL;
  1369. header_bytes = dst - sei_start;
  1370. offset = header_bytes;
  1371. bytes_written = copy_emulation_prev(sei->data,
  1372. sei->size,
  1373. sei_start,
  1374. offset,
  1375. dst_size);
  1376. if (bytes_written < 0)
  1377. return AVERROR_BUFFER_TOO_SMALL;
  1378. bytes_written += header_bytes;
  1379. return bytes_written;
  1380. }
  1381. /**
  1382. * Copies NAL units and replaces length codes with
  1383. * H.264 Annex B start codes. On failure, the contents of
  1384. * dst_data may have been modified.
  1385. *
  1386. * @param length_code_size Byte length of each length code
  1387. * @param sample_buffer NAL units prefixed with length codes.
  1388. * @param sei Optional A53 closed captions SEI data.
  1389. * @param dst_data Must be zeroed before calling this function.
  1390. * Contains the copied NAL units prefixed with
  1391. * start codes when the function returns
  1392. * successfully.
  1393. * @param dst_size Length of dst_data
  1394. * @return 0 on success
  1395. * AVERROR_INVALIDDATA if length_code_size is invalid
  1396. * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
  1397. * or if a length_code in src_data specifies data beyond
  1398. * the end of its buffer.
  1399. */
  1400. static int copy_replace_length_codes(
  1401. AVCodecContext *avctx,
  1402. size_t length_code_size,
  1403. CMSampleBufferRef sample_buffer,
  1404. ExtraSEI *sei,
  1405. uint8_t *dst_data,
  1406. size_t dst_size)
  1407. {
  1408. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1409. size_t remaining_src_size = src_size;
  1410. size_t remaining_dst_size = dst_size;
  1411. size_t src_offset = 0;
  1412. int wrote_sei = 0;
  1413. int status;
  1414. uint8_t size_buf[4];
  1415. uint8_t nal_type;
  1416. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  1417. if (length_code_size > 4) {
  1418. return AVERROR_INVALIDDATA;
  1419. }
  1420. while (remaining_src_size > 0) {
  1421. size_t curr_src_len;
  1422. size_t curr_dst_len;
  1423. size_t box_len = 0;
  1424. size_t i;
  1425. uint8_t *dst_box;
  1426. status = CMBlockBufferCopyDataBytes(block,
  1427. src_offset,
  1428. length_code_size,
  1429. size_buf);
  1430. if (status) {
  1431. av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
  1432. return AVERROR_EXTERNAL;
  1433. }
  1434. status = CMBlockBufferCopyDataBytes(block,
  1435. src_offset + length_code_size,
  1436. 1,
  1437. &nal_type);
  1438. if (status) {
  1439. av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
  1440. return AVERROR_EXTERNAL;
  1441. }
  1442. nal_type &= 0x1F;
  1443. for (i = 0; i < length_code_size; i++) {
  1444. box_len <<= 8;
  1445. box_len |= size_buf[i];
  1446. }
  1447. if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
  1448. //No SEI NAL unit - insert.
  1449. int wrote_bytes;
  1450. memcpy(dst_data, start_code, sizeof(start_code));
  1451. dst_data += sizeof(start_code);
  1452. remaining_dst_size -= sizeof(start_code);
  1453. *dst_data = H264_NAL_SEI;
  1454. dst_data++;
  1455. remaining_dst_size--;
  1456. wrote_bytes = write_sei(sei,
  1457. SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35,
  1458. dst_data,
  1459. remaining_dst_size);
  1460. if (wrote_bytes < 0)
  1461. return wrote_bytes;
  1462. remaining_dst_size -= wrote_bytes;
  1463. dst_data += wrote_bytes;
  1464. if (remaining_dst_size <= 0)
  1465. return AVERROR_BUFFER_TOO_SMALL;
  1466. *dst_data = 0x80;
  1467. dst_data++;
  1468. remaining_dst_size--;
  1469. wrote_sei = 1;
  1470. }
  1471. curr_src_len = box_len + length_code_size;
  1472. curr_dst_len = box_len + sizeof(start_code);
  1473. if (remaining_src_size < curr_src_len) {
  1474. return AVERROR_BUFFER_TOO_SMALL;
  1475. }
  1476. if (remaining_dst_size < curr_dst_len) {
  1477. return AVERROR_BUFFER_TOO_SMALL;
  1478. }
  1479. dst_box = dst_data + sizeof(start_code);
  1480. memcpy(dst_data, start_code, sizeof(start_code));
  1481. status = CMBlockBufferCopyDataBytes(block,
  1482. src_offset + length_code_size,
  1483. box_len,
  1484. dst_box);
  1485. if (status) {
  1486. av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
  1487. return AVERROR_EXTERNAL;
  1488. }
  1489. if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
  1490. //Found SEI NAL unit - append.
  1491. int wrote_bytes;
  1492. int old_sei_length;
  1493. int extra_bytes;
  1494. uint8_t *new_sei;
  1495. old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
  1496. if (old_sei_length < 0)
  1497. return status;
  1498. wrote_bytes = write_sei(sei,
  1499. SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35,
  1500. new_sei,
  1501. remaining_dst_size - old_sei_length);
  1502. if (wrote_bytes < 0)
  1503. return wrote_bytes;
  1504. if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
  1505. return AVERROR_BUFFER_TOO_SMALL;
  1506. new_sei[wrote_bytes++] = 0x80;
  1507. extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
  1508. dst_data += extra_bytes;
  1509. remaining_dst_size -= extra_bytes;
  1510. wrote_sei = 1;
  1511. }
  1512. src_offset += curr_src_len;
  1513. dst_data += curr_dst_len;
  1514. remaining_src_size -= curr_src_len;
  1515. remaining_dst_size -= curr_dst_len;
  1516. }
  1517. return 0;
  1518. }
  1519. /**
  1520. * Returns a sufficient number of bytes to contain the sei data.
  1521. * It may be greater than the minimum required.
  1522. */
  1523. static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
  1524. int copied_size;
  1525. if (sei->size == 0)
  1526. return 0;
  1527. copied_size = -copy_emulation_prev(sei->data,
  1528. sei->size,
  1529. NULL,
  1530. 0,
  1531. 0);
  1532. if ((sei->size % 255) == 0) //may result in an extra byte
  1533. copied_size++;
  1534. return copied_size + sei->size / 255 + 1 + type / 255 + 1;
  1535. }
  1536. static int vtenc_cm_to_avpacket(
  1537. AVCodecContext *avctx,
  1538. CMSampleBufferRef sample_buffer,
  1539. AVPacket *pkt,
  1540. ExtraSEI *sei)
  1541. {
  1542. VTEncContext *vtctx = avctx->priv_data;
  1543. int status;
  1544. bool is_key_frame;
  1545. bool add_header;
  1546. size_t length_code_size;
  1547. size_t header_size = 0;
  1548. size_t in_buf_size;
  1549. size_t out_buf_size;
  1550. size_t sei_nalu_size = 0;
  1551. int64_t dts_delta;
  1552. int64_t time_base_num;
  1553. int nalu_count;
  1554. CMTime pts;
  1555. CMTime dts;
  1556. CMVideoFormatDescriptionRef vid_fmt;
  1557. vtenc_get_frame_info(sample_buffer, &is_key_frame);
  1558. status = get_length_code_size(avctx, sample_buffer, &length_code_size);
  1559. if (status) return status;
  1560. add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
  1561. if (add_header) {
  1562. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  1563. if (!vid_fmt) {
  1564. av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
  1565. return AVERROR_EXTERNAL;
  1566. }
  1567. int status = get_params_size(avctx, vid_fmt, &header_size);
  1568. if (status) return status;
  1569. }
  1570. status = count_nalus(length_code_size, sample_buffer, &nalu_count);
  1571. if(status)
  1572. return status;
  1573. if (sei) {
  1574. size_t msg_size = get_sei_msg_bytes(sei,
  1575. SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35);
  1576. sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
  1577. }
  1578. in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1579. out_buf_size = header_size +
  1580. in_buf_size +
  1581. sei_nalu_size +
  1582. nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
  1583. status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
  1584. if (status < 0)
  1585. return status;
  1586. if (add_header) {
  1587. status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
  1588. if(status) return status;
  1589. }
  1590. status = copy_replace_length_codes(
  1591. avctx,
  1592. length_code_size,
  1593. sample_buffer,
  1594. sei,
  1595. pkt->data + header_size,
  1596. pkt->size - header_size
  1597. );
  1598. if (status) {
  1599. av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
  1600. return status;
  1601. }
  1602. if (is_key_frame) {
  1603. pkt->flags |= AV_PKT_FLAG_KEY;
  1604. }
  1605. pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
  1606. dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
  1607. if (CMTIME_IS_INVALID(dts)) {
  1608. if (!vtctx->has_b_frames) {
  1609. dts = pts;
  1610. } else {
  1611. av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
  1612. return AVERROR_EXTERNAL;
  1613. }
  1614. }
  1615. dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
  1616. time_base_num = avctx->time_base.num;
  1617. pkt->pts = pts.value / time_base_num;
  1618. pkt->dts = dts.value / time_base_num - dts_delta;
  1619. pkt->size = out_buf_size;
  1620. return 0;
  1621. }
  1622. /*
  1623. * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
  1624. * containing all planes if so.
  1625. */
  1626. static int get_cv_pixel_info(
  1627. AVCodecContext *avctx,
  1628. const AVFrame *frame,
  1629. int *color,
  1630. int *plane_count,
  1631. size_t *widths,
  1632. size_t *heights,
  1633. size_t *strides,
  1634. size_t *contiguous_buf_size)
  1635. {
  1636. VTEncContext *vtctx = avctx->priv_data;
  1637. int av_format = frame->format;
  1638. int av_color_range = frame->color_range;
  1639. int i;
  1640. int range_guessed;
  1641. int status;
  1642. status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
  1643. if (status) {
  1644. av_log(avctx,
  1645. AV_LOG_ERROR,
  1646. "Could not get pixel format for color format '%s' range '%s'.\n",
  1647. av_get_pix_fmt_name(av_format),
  1648. av_color_range > AVCOL_RANGE_UNSPECIFIED &&
  1649. av_color_range < AVCOL_RANGE_NB ?
  1650. av_color_range_name(av_color_range) :
  1651. "Unknown");
  1652. return AVERROR(EINVAL);
  1653. }
  1654. if (range_guessed) {
  1655. if (!vtctx->warned_color_range) {
  1656. vtctx->warned_color_range = true;
  1657. av_log(avctx,
  1658. AV_LOG_WARNING,
  1659. "Color range not set for %s. Using MPEG range.\n",
  1660. av_get_pix_fmt_name(av_format));
  1661. }
  1662. }
  1663. switch (av_format) {
  1664. case AV_PIX_FMT_NV12:
  1665. *plane_count = 2;
  1666. widths [0] = avctx->width;
  1667. heights[0] = avctx->height;
  1668. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1669. widths [1] = (avctx->width + 1) / 2;
  1670. heights[1] = (avctx->height + 1) / 2;
  1671. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
  1672. break;
  1673. case AV_PIX_FMT_YUV420P:
  1674. *plane_count = 3;
  1675. widths [0] = avctx->width;
  1676. heights[0] = avctx->height;
  1677. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1678. widths [1] = (avctx->width + 1) / 2;
  1679. heights[1] = (avctx->height + 1) / 2;
  1680. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
  1681. widths [2] = (avctx->width + 1) / 2;
  1682. heights[2] = (avctx->height + 1) / 2;
  1683. strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
  1684. break;
  1685. case AV_PIX_FMT_P010LE:
  1686. *plane_count = 2;
  1687. widths[0] = avctx->width;
  1688. heights[0] = avctx->height;
  1689. strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
  1690. widths[1] = (avctx->width + 1) / 2;
  1691. heights[1] = (avctx->height + 1) / 2;
  1692. strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
  1693. break;
  1694. default:
  1695. av_log(
  1696. avctx,
  1697. AV_LOG_ERROR,
  1698. "Could not get frame format info for color %d range %d.\n",
  1699. av_format,
  1700. av_color_range);
  1701. return AVERROR(EINVAL);
  1702. }
  1703. *contiguous_buf_size = 0;
  1704. for (i = 0; i < *plane_count; i++) {
  1705. if (i < *plane_count - 1 &&
  1706. frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
  1707. *contiguous_buf_size = 0;
  1708. break;
  1709. }
  1710. *contiguous_buf_size += strides[i] * heights[i];
  1711. }
  1712. return 0;
  1713. }
  1714. //Not used on OSX - frame is never copied.
  1715. static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
  1716. const AVFrame *frame,
  1717. CVPixelBufferRef cv_img,
  1718. const size_t *plane_strides,
  1719. const size_t *plane_rows)
  1720. {
  1721. int i, j;
  1722. size_t plane_count;
  1723. int status;
  1724. int rows;
  1725. int src_stride;
  1726. int dst_stride;
  1727. uint8_t *src_addr;
  1728. uint8_t *dst_addr;
  1729. size_t copy_bytes;
  1730. status = CVPixelBufferLockBaseAddress(cv_img, 0);
  1731. if (status) {
  1732. av_log(
  1733. avctx,
  1734. AV_LOG_ERROR,
  1735. "Error: Could not lock base address of CVPixelBuffer: %d.\n",
  1736. status
  1737. );
  1738. }
  1739. if (CVPixelBufferIsPlanar(cv_img)) {
  1740. plane_count = CVPixelBufferGetPlaneCount(cv_img);
  1741. for (i = 0; frame->data[i]; i++) {
  1742. if (i == plane_count) {
  1743. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1744. av_log(avctx,
  1745. AV_LOG_ERROR,
  1746. "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
  1747. );
  1748. return AVERROR_EXTERNAL;
  1749. }
  1750. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
  1751. src_addr = (uint8_t*)frame->data[i];
  1752. dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
  1753. src_stride = plane_strides[i];
  1754. rows = plane_rows[i];
  1755. if (dst_stride == src_stride) {
  1756. memcpy(dst_addr, src_addr, src_stride * rows);
  1757. } else {
  1758. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1759. for (j = 0; j < rows; j++) {
  1760. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1761. }
  1762. }
  1763. }
  1764. } else {
  1765. if (frame->data[1]) {
  1766. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1767. av_log(avctx,
  1768. AV_LOG_ERROR,
  1769. "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
  1770. );
  1771. return AVERROR_EXTERNAL;
  1772. }
  1773. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
  1774. src_addr = (uint8_t*)frame->data[0];
  1775. dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
  1776. src_stride = plane_strides[0];
  1777. rows = plane_rows[0];
  1778. if (dst_stride == src_stride) {
  1779. memcpy(dst_addr, src_addr, src_stride * rows);
  1780. } else {
  1781. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1782. for (j = 0; j < rows; j++) {
  1783. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1784. }
  1785. }
  1786. }
  1787. status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1788. if (status) {
  1789. av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
  1790. return AVERROR_EXTERNAL;
  1791. }
  1792. return 0;
  1793. }
  1794. static int create_cv_pixel_buffer(AVCodecContext *avctx,
  1795. const AVFrame *frame,
  1796. CVPixelBufferRef *cv_img)
  1797. {
  1798. int plane_count;
  1799. int color;
  1800. size_t widths [AV_NUM_DATA_POINTERS];
  1801. size_t heights[AV_NUM_DATA_POINTERS];
  1802. size_t strides[AV_NUM_DATA_POINTERS];
  1803. int status;
  1804. size_t contiguous_buf_size;
  1805. CVPixelBufferPoolRef pix_buf_pool;
  1806. VTEncContext* vtctx = avctx->priv_data;
  1807. if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
  1808. av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
  1809. *cv_img = (CVPixelBufferRef)frame->data[3];
  1810. av_assert0(*cv_img);
  1811. CFRetain(*cv_img);
  1812. return 0;
  1813. }
  1814. memset(widths, 0, sizeof(widths));
  1815. memset(heights, 0, sizeof(heights));
  1816. memset(strides, 0, sizeof(strides));
  1817. status = get_cv_pixel_info(
  1818. avctx,
  1819. frame,
  1820. &color,
  1821. &plane_count,
  1822. widths,
  1823. heights,
  1824. strides,
  1825. &contiguous_buf_size
  1826. );
  1827. if (status) {
  1828. av_log(
  1829. avctx,
  1830. AV_LOG_ERROR,
  1831. "Error: Cannot convert format %d color_range %d: %d\n",
  1832. frame->format,
  1833. frame->color_range,
  1834. status
  1835. );
  1836. return AVERROR_EXTERNAL;
  1837. }
  1838. pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
  1839. if (!pix_buf_pool) {
  1840. /* On iOS, the VT session is invalidated when the APP switches from
  1841. * foreground to background and vice versa. Fetch the actual error code
  1842. * of the VT session to detect that case and restart the VT session
  1843. * accordingly. */
  1844. OSStatus vtstatus;
  1845. vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
  1846. if (vtstatus == kVTInvalidSessionErr) {
  1847. CFRelease(vtctx->session);
  1848. vtctx->session = NULL;
  1849. status = vtenc_configure_encoder(avctx);
  1850. if (status == 0)
  1851. pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
  1852. }
  1853. if (!pix_buf_pool) {
  1854. av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
  1855. return AVERROR_EXTERNAL;
  1856. }
  1857. else
  1858. av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
  1859. "kVTInvalidSessionErr error.\n");
  1860. }
  1861. status = CVPixelBufferPoolCreatePixelBuffer(NULL,
  1862. pix_buf_pool,
  1863. cv_img);
  1864. if (status) {
  1865. av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
  1866. return AVERROR_EXTERNAL;
  1867. }
  1868. status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
  1869. if (status) {
  1870. CFRelease(*cv_img);
  1871. *cv_img = NULL;
  1872. return status;
  1873. }
  1874. return 0;
  1875. }
  1876. static int create_encoder_dict_h264(const AVFrame *frame,
  1877. CFDictionaryRef* dict_out)
  1878. {
  1879. CFDictionaryRef dict = NULL;
  1880. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  1881. const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
  1882. const void *vals[] = { kCFBooleanTrue };
  1883. dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
  1884. if(!dict) return AVERROR(ENOMEM);
  1885. }
  1886. *dict_out = dict;
  1887. return 0;
  1888. }
  1889. static int vtenc_send_frame(AVCodecContext *avctx,
  1890. VTEncContext *vtctx,
  1891. const AVFrame *frame)
  1892. {
  1893. CMTime time;
  1894. CFDictionaryRef frame_dict;
  1895. CVPixelBufferRef cv_img = NULL;
  1896. AVFrameSideData *side_data = NULL;
  1897. ExtraSEI *sei = NULL;
  1898. int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
  1899. if (status) return status;
  1900. status = create_encoder_dict_h264(frame, &frame_dict);
  1901. if (status) {
  1902. CFRelease(cv_img);
  1903. return status;
  1904. }
  1905. side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
  1906. if (vtctx->a53_cc && side_data && side_data->size) {
  1907. sei = av_mallocz(sizeof(*sei));
  1908. if (!sei) {
  1909. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1910. } else {
  1911. int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
  1912. if (ret < 0) {
  1913. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1914. av_free(sei);
  1915. sei = NULL;
  1916. }
  1917. }
  1918. }
  1919. time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
  1920. status = VTCompressionSessionEncodeFrame(
  1921. vtctx->session,
  1922. cv_img,
  1923. time,
  1924. kCMTimeInvalid,
  1925. frame_dict,
  1926. sei,
  1927. NULL
  1928. );
  1929. if (frame_dict) CFRelease(frame_dict);
  1930. CFRelease(cv_img);
  1931. if (status) {
  1932. av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
  1933. return AVERROR_EXTERNAL;
  1934. }
  1935. return 0;
  1936. }
  1937. static av_cold int vtenc_frame(
  1938. AVCodecContext *avctx,
  1939. AVPacket *pkt,
  1940. const AVFrame *frame,
  1941. int *got_packet)
  1942. {
  1943. VTEncContext *vtctx = avctx->priv_data;
  1944. bool get_frame;
  1945. int status;
  1946. CMSampleBufferRef buf = NULL;
  1947. ExtraSEI *sei = NULL;
  1948. if (frame) {
  1949. status = vtenc_send_frame(avctx, vtctx, frame);
  1950. if (status) {
  1951. status = AVERROR_EXTERNAL;
  1952. goto end_nopkt;
  1953. }
  1954. if (vtctx->frame_ct_in == 0) {
  1955. vtctx->first_pts = frame->pts;
  1956. } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
  1957. vtctx->dts_delta = frame->pts - vtctx->first_pts;
  1958. }
  1959. vtctx->frame_ct_in++;
  1960. } else if(!vtctx->flushing) {
  1961. vtctx->flushing = true;
  1962. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1963. kCMTimeIndefinite);
  1964. if (status) {
  1965. av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
  1966. status = AVERROR_EXTERNAL;
  1967. goto end_nopkt;
  1968. }
  1969. }
  1970. *got_packet = 0;
  1971. get_frame = vtctx->dts_delta >= 0 || !frame;
  1972. if (!get_frame) {
  1973. status = 0;
  1974. goto end_nopkt;
  1975. }
  1976. status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
  1977. if (status) goto end_nopkt;
  1978. if (!buf) goto end_nopkt;
  1979. status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
  1980. if (sei) {
  1981. if (sei->data) av_free(sei->data);
  1982. av_free(sei);
  1983. }
  1984. CFRelease(buf);
  1985. if (status) goto end_nopkt;
  1986. *got_packet = 1;
  1987. return 0;
  1988. end_nopkt:
  1989. av_packet_unref(pkt);
  1990. return status;
  1991. }
  1992. static int vtenc_populate_extradata(AVCodecContext *avctx,
  1993. CMVideoCodecType codec_type,
  1994. CFStringRef profile_level,
  1995. CFNumberRef gamma_level,
  1996. CFDictionaryRef enc_info,
  1997. CFDictionaryRef pixel_buffer_info)
  1998. {
  1999. VTEncContext *vtctx = avctx->priv_data;
  2000. int status;
  2001. CVPixelBufferPoolRef pool = NULL;
  2002. CVPixelBufferRef pix_buf = NULL;
  2003. CMTime time;
  2004. CMSampleBufferRef buf = NULL;
  2005. status = vtenc_create_encoder(avctx,
  2006. codec_type,
  2007. profile_level,
  2008. gamma_level,
  2009. enc_info,
  2010. pixel_buffer_info,
  2011. &vtctx->session);
  2012. if (status)
  2013. goto pe_cleanup;
  2014. pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
  2015. if(!pool){
  2016. av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
  2017. goto pe_cleanup;
  2018. }
  2019. status = CVPixelBufferPoolCreatePixelBuffer(NULL,
  2020. pool,
  2021. &pix_buf);
  2022. if(status != kCVReturnSuccess){
  2023. av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
  2024. goto pe_cleanup;
  2025. }
  2026. time = CMTimeMake(0, avctx->time_base.den);
  2027. status = VTCompressionSessionEncodeFrame(vtctx->session,
  2028. pix_buf,
  2029. time,
  2030. kCMTimeInvalid,
  2031. NULL,
  2032. NULL,
  2033. NULL);
  2034. if (status) {
  2035. av_log(avctx,
  2036. AV_LOG_ERROR,
  2037. "Error sending frame for extradata: %d\n",
  2038. status);
  2039. goto pe_cleanup;
  2040. }
  2041. //Populates extradata - output frames are flushed and param sets are available.
  2042. status = VTCompressionSessionCompleteFrames(vtctx->session,
  2043. kCMTimeIndefinite);
  2044. if (status)
  2045. goto pe_cleanup;
  2046. status = vtenc_q_pop(vtctx, 0, &buf, NULL);
  2047. if (status) {
  2048. av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
  2049. goto pe_cleanup;
  2050. }
  2051. CFRelease(buf);
  2052. pe_cleanup:
  2053. if(vtctx->session)
  2054. CFRelease(vtctx->session);
  2055. vtctx->session = NULL;
  2056. vtctx->frame_ct_out = 0;
  2057. av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
  2058. return status;
  2059. }
  2060. static av_cold int vtenc_close(AVCodecContext *avctx)
  2061. {
  2062. VTEncContext *vtctx = avctx->priv_data;
  2063. if(!vtctx->session) {
  2064. pthread_cond_destroy(&vtctx->cv_sample_sent);
  2065. pthread_mutex_destroy(&vtctx->lock);
  2066. return 0;
  2067. }
  2068. VTCompressionSessionCompleteFrames(vtctx->session,
  2069. kCMTimeIndefinite);
  2070. clear_frame_queue(vtctx);
  2071. pthread_cond_destroy(&vtctx->cv_sample_sent);
  2072. pthread_mutex_destroy(&vtctx->lock);
  2073. CFRelease(vtctx->session);
  2074. vtctx->session = NULL;
  2075. if (vtctx->color_primaries) {
  2076. CFRelease(vtctx->color_primaries);
  2077. vtctx->color_primaries = NULL;
  2078. }
  2079. if (vtctx->transfer_function) {
  2080. CFRelease(vtctx->transfer_function);
  2081. vtctx->transfer_function = NULL;
  2082. }
  2083. if (vtctx->ycbcr_matrix) {
  2084. CFRelease(vtctx->ycbcr_matrix);
  2085. vtctx->ycbcr_matrix = NULL;
  2086. }
  2087. return 0;
  2088. }
  2089. static const enum AVPixelFormat avc_pix_fmts[] = {
  2090. AV_PIX_FMT_VIDEOTOOLBOX,
  2091. AV_PIX_FMT_NV12,
  2092. AV_PIX_FMT_YUV420P,
  2093. AV_PIX_FMT_NONE
  2094. };
  2095. static const enum AVPixelFormat hevc_pix_fmts[] = {
  2096. AV_PIX_FMT_VIDEOTOOLBOX,
  2097. AV_PIX_FMT_NV12,
  2098. AV_PIX_FMT_YUV420P,
  2099. AV_PIX_FMT_P010LE,
  2100. AV_PIX_FMT_NONE
  2101. };
  2102. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  2103. #define COMMON_OPTIONS \
  2104. { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
  2105. { .i64 = 0 }, 0, 1, VE }, \
  2106. { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
  2107. { .i64 = 0 }, 0, 1, VE }, \
  2108. { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
  2109. OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
  2110. { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
  2111. OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
  2112. { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
  2113. OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  2114. #define OFFSET(x) offsetof(VTEncContext, x)
  2115. static const AVOption h264_options[] = {
  2116. { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
  2117. { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
  2118. { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
  2119. { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
  2120. { "extended", "Extend Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
  2121. { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
  2122. { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
  2123. { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
  2124. { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
  2125. { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
  2126. { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
  2127. { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
  2128. { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
  2129. { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
  2130. { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
  2131. { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
  2132. { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
  2133. { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  2134. { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  2135. { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  2136. { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  2137. { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
  2138. COMMON_OPTIONS
  2139. { NULL },
  2140. };
  2141. static const AVClass h264_videotoolbox_class = {
  2142. .class_name = "h264_videotoolbox",
  2143. .item_name = av_default_item_name,
  2144. .option = h264_options,
  2145. .version = LIBAVUTIL_VERSION_INT,
  2146. };
  2147. AVCodec ff_h264_videotoolbox_encoder = {
  2148. .name = "h264_videotoolbox",
  2149. .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
  2150. .type = AVMEDIA_TYPE_VIDEO,
  2151. .id = AV_CODEC_ID_H264,
  2152. .priv_data_size = sizeof(VTEncContext),
  2153. .pix_fmts = avc_pix_fmts,
  2154. .init = vtenc_init,
  2155. .encode2 = vtenc_frame,
  2156. .close = vtenc_close,
  2157. .capabilities = AV_CODEC_CAP_DELAY,
  2158. .priv_class = &h264_videotoolbox_class,
  2159. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  2160. FF_CODEC_CAP_INIT_CLEANUP,
  2161. };
  2162. static const AVOption hevc_options[] = {
  2163. { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
  2164. { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
  2165. { "main10", "Main10 Profile", 0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
  2166. COMMON_OPTIONS
  2167. { NULL },
  2168. };
  2169. static const AVClass hevc_videotoolbox_class = {
  2170. .class_name = "hevc_videotoolbox",
  2171. .item_name = av_default_item_name,
  2172. .option = hevc_options,
  2173. .version = LIBAVUTIL_VERSION_INT,
  2174. };
  2175. AVCodec ff_hevc_videotoolbox_encoder = {
  2176. .name = "hevc_videotoolbox",
  2177. .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
  2178. .type = AVMEDIA_TYPE_VIDEO,
  2179. .id = AV_CODEC_ID_HEVC,
  2180. .priv_data_size = sizeof(VTEncContext),
  2181. .pix_fmts = hevc_pix_fmts,
  2182. .init = vtenc_init,
  2183. .encode2 = vtenc_frame,
  2184. .close = vtenc_close,
  2185. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
  2186. .priv_class = &hevc_videotoolbox_class,
  2187. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  2188. FF_CODEC_CAP_INIT_CLEANUP,
  2189. .wrapper_name = "videotoolbox",
  2190. };