You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2004 lines
63KB

  1. /*
  2. * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <VideoToolbox/VideoToolbox.h>
  21. #include <CoreVideo/CoreVideo.h>
  22. #include <CoreMedia/CoreMedia.h>
  23. #include <TargetConditionals.h>
  24. #include <Availability.h>
  25. #include "avcodec.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/atomic.h"
  29. #include "libavutil/avstring.h"
  30. #include "libavcodec/avcodec.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "internal.h"
  33. #include <pthread.h>
  34. #if !CONFIG_VT_BT2020
  35. # define kCVImageBufferColorPrimaries_ITU_R_2020 CFSTR("ITU_R_2020")
  36. # define kCVImageBufferTransferFunction_ITU_R_2020 CFSTR("ITU_R_2020")
  37. # define kCVImageBufferYCbCrMatrix_ITU_R_2020 CFSTR("ITU_R_2020")
  38. #endif
  39. typedef enum VT_H264Profile {
  40. H264_PROF_AUTO,
  41. H264_PROF_BASELINE,
  42. H264_PROF_MAIN,
  43. H264_PROF_HIGH,
  44. H264_PROF_COUNT
  45. } VT_H264Profile;
  46. typedef enum VTH264Entropy{
  47. VT_ENTROPY_NOT_SET,
  48. VT_CAVLC,
  49. VT_CABAC
  50. } VTH264Entropy;
  51. static const uint8_t start_code[] = { 0, 0, 0, 1 };
  52. typedef struct BufNode {
  53. CMSampleBufferRef cm_buffer;
  54. struct BufNode* next;
  55. int error;
  56. } BufNode;
  57. typedef struct VTEncContext {
  58. AVClass *class;
  59. VTCompressionSessionRef session;
  60. CFStringRef ycbcr_matrix;
  61. CFStringRef color_primaries;
  62. CFStringRef transfer_function;
  63. pthread_mutex_t lock;
  64. pthread_cond_t cv_sample_sent;
  65. int async_error;
  66. BufNode *q_head;
  67. BufNode *q_tail;
  68. int64_t frame_ct_out;
  69. int64_t frame_ct_in;
  70. int64_t first_pts;
  71. int64_t dts_delta;
  72. int64_t profile;
  73. int64_t level;
  74. int64_t entropy;
  75. int64_t realtime;
  76. int64_t frames_before;
  77. int64_t frames_after;
  78. int64_t allow_sw;
  79. bool flushing;
  80. bool has_b_frames;
  81. bool warned_color_range;
  82. } VTEncContext;
  83. static int vtenc_populate_extradata(AVCodecContext *avctx,
  84. CMVideoCodecType codec_type,
  85. CFStringRef profile_level,
  86. CFNumberRef gamma_level,
  87. CFDictionaryRef enc_info,
  88. CFDictionaryRef pixel_buffer_info);
  89. /**
  90. * NULL-safe release of *refPtr, and sets value to NULL.
  91. */
  92. static void vt_release_num(CFNumberRef* refPtr){
  93. if (!*refPtr) {
  94. return;
  95. }
  96. CFRelease(*refPtr);
  97. *refPtr = NULL;
  98. }
  99. static void set_async_error(VTEncContext *vtctx, int err)
  100. {
  101. BufNode *info;
  102. pthread_mutex_lock(&vtctx->lock);
  103. vtctx->async_error = err;
  104. info = vtctx->q_head;
  105. vtctx->q_head = vtctx->q_tail = NULL;
  106. while (info) {
  107. BufNode *next = info->next;
  108. CFRelease(info->cm_buffer);
  109. av_free(info);
  110. info = next;
  111. }
  112. pthread_mutex_unlock(&vtctx->lock);
  113. }
  114. static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf)
  115. {
  116. BufNode *info;
  117. pthread_mutex_lock(&vtctx->lock);
  118. if (vtctx->async_error) {
  119. pthread_mutex_unlock(&vtctx->lock);
  120. return vtctx->async_error;
  121. }
  122. if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
  123. *buf = NULL;
  124. pthread_mutex_unlock(&vtctx->lock);
  125. return 0;
  126. }
  127. while (!vtctx->q_head && !vtctx->async_error && wait) {
  128. pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
  129. }
  130. if (!vtctx->q_head) {
  131. pthread_mutex_unlock(&vtctx->lock);
  132. *buf = NULL;
  133. return 0;
  134. }
  135. info = vtctx->q_head;
  136. vtctx->q_head = vtctx->q_head->next;
  137. if (!vtctx->q_head) {
  138. vtctx->q_tail = NULL;
  139. }
  140. pthread_mutex_unlock(&vtctx->lock);
  141. *buf = info->cm_buffer;
  142. av_free(info);
  143. vtctx->frame_ct_out++;
  144. return 0;
  145. }
  146. static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer)
  147. {
  148. BufNode *info = av_malloc(sizeof(BufNode));
  149. if (!info) {
  150. set_async_error(vtctx, AVERROR(ENOMEM));
  151. return;
  152. }
  153. CFRetain(buffer);
  154. info->cm_buffer = buffer;
  155. info->next = NULL;
  156. pthread_mutex_lock(&vtctx->lock);
  157. pthread_cond_signal(&vtctx->cv_sample_sent);
  158. if (!vtctx->q_head) {
  159. vtctx->q_head = info;
  160. } else {
  161. vtctx->q_tail->next = info;
  162. }
  163. vtctx->q_tail = info;
  164. pthread_mutex_unlock(&vtctx->lock);
  165. }
  166. static int count_nalus(size_t length_code_size,
  167. CMSampleBufferRef sample_buffer,
  168. int *count)
  169. {
  170. size_t offset = 0;
  171. int status;
  172. int nalu_ct = 0;
  173. uint8_t size_buf[4];
  174. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  175. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  176. if (length_code_size > 4)
  177. return AVERROR_INVALIDDATA;
  178. while (offset < src_size) {
  179. size_t curr_src_len;
  180. size_t box_len = 0;
  181. size_t i;
  182. status = CMBlockBufferCopyDataBytes(block,
  183. offset,
  184. length_code_size,
  185. size_buf);
  186. for (i = 0; i < length_code_size; i++) {
  187. box_len <<= 8;
  188. box_len |= size_buf[i];
  189. }
  190. curr_src_len = box_len + length_code_size;
  191. offset += curr_src_len;
  192. nalu_ct++;
  193. }
  194. *count = nalu_ct;
  195. return 0;
  196. }
  197. static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
  198. {
  199. switch (id) {
  200. case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
  201. default: return 0;
  202. }
  203. }
  204. /**
  205. * Get the parameter sets from a CMSampleBufferRef.
  206. * @param dst If *dst isn't NULL, the parameters are copied into existing
  207. * memory. *dst_size must be set accordingly when *dst != NULL.
  208. * If *dst is NULL, it will be allocated.
  209. * In all cases, *dst_size is set to the number of bytes used starting
  210. * at *dst.
  211. */
  212. static int get_params_size(
  213. AVCodecContext *avctx,
  214. CMVideoFormatDescriptionRef vid_fmt,
  215. size_t *size)
  216. {
  217. size_t total_size = 0;
  218. size_t ps_count;
  219. int is_count_bad = 0;
  220. size_t i;
  221. int status;
  222. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  223. 0,
  224. NULL,
  225. NULL,
  226. &ps_count,
  227. NULL);
  228. if (status) {
  229. is_count_bad = 1;
  230. ps_count = 0;
  231. status = 0;
  232. }
  233. for (i = 0; i < ps_count || is_count_bad; i++) {
  234. const uint8_t *ps;
  235. size_t ps_size;
  236. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  237. i,
  238. &ps,
  239. &ps_size,
  240. NULL,
  241. NULL);
  242. if (status) {
  243. /*
  244. * When ps_count is invalid, status != 0 ends the loop normally
  245. * unless we didn't get any parameter sets.
  246. */
  247. if (i > 0 && is_count_bad) status = 0;
  248. break;
  249. }
  250. total_size += ps_size + sizeof(start_code);
  251. }
  252. if (status) {
  253. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
  254. return AVERROR_EXTERNAL;
  255. }
  256. *size = total_size;
  257. return 0;
  258. }
  259. static int copy_param_sets(
  260. AVCodecContext *avctx,
  261. CMVideoFormatDescriptionRef vid_fmt,
  262. uint8_t *dst,
  263. size_t dst_size)
  264. {
  265. size_t ps_count;
  266. int is_count_bad = 0;
  267. int status;
  268. size_t offset = 0;
  269. size_t i;
  270. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  271. 0,
  272. NULL,
  273. NULL,
  274. &ps_count,
  275. NULL);
  276. if (status) {
  277. is_count_bad = 1;
  278. ps_count = 0;
  279. status = 0;
  280. }
  281. for (i = 0; i < ps_count || is_count_bad; i++) {
  282. const uint8_t *ps;
  283. size_t ps_size;
  284. size_t next_offset;
  285. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  286. i,
  287. &ps,
  288. &ps_size,
  289. NULL,
  290. NULL);
  291. if (status) {
  292. if (i > 0 && is_count_bad) status = 0;
  293. break;
  294. }
  295. next_offset = offset + sizeof(start_code) + ps_size;
  296. if (dst_size < next_offset) {
  297. av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
  298. return AVERROR_BUFFER_TOO_SMALL;
  299. }
  300. memcpy(dst + offset, start_code, sizeof(start_code));
  301. offset += sizeof(start_code);
  302. memcpy(dst + offset, ps, ps_size);
  303. offset = next_offset;
  304. }
  305. if (status) {
  306. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
  307. return AVERROR_EXTERNAL;
  308. }
  309. return 0;
  310. }
  311. static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
  312. {
  313. CMVideoFormatDescriptionRef vid_fmt;
  314. size_t total_size;
  315. int status;
  316. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  317. if (!vid_fmt) {
  318. av_log(avctx, AV_LOG_ERROR, "No video format.\n");
  319. return AVERROR_EXTERNAL;
  320. }
  321. status = get_params_size(avctx, vid_fmt, &total_size);
  322. if (status) {
  323. av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
  324. return status;
  325. }
  326. avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
  327. if (!avctx->extradata) {
  328. return AVERROR(ENOMEM);
  329. }
  330. avctx->extradata_size = total_size;
  331. status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
  332. if (status) {
  333. av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
  334. return status;
  335. }
  336. return 0;
  337. }
  338. static void vtenc_output_callback(
  339. void *ctx,
  340. void *sourceFrameCtx,
  341. OSStatus status,
  342. VTEncodeInfoFlags flags,
  343. CMSampleBufferRef sample_buffer)
  344. {
  345. AVCodecContext *avctx = ctx;
  346. VTEncContext *vtctx = avctx->priv_data;
  347. if (vtctx->async_error) {
  348. if(sample_buffer) CFRelease(sample_buffer);
  349. return;
  350. }
  351. if (status || !sample_buffer) {
  352. av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
  353. set_async_error(vtctx, AVERROR_EXTERNAL);
  354. return;
  355. }
  356. if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  357. int set_status = set_extradata(avctx, sample_buffer);
  358. if (set_status) {
  359. set_async_error(vtctx, set_status);
  360. return;
  361. }
  362. }
  363. vtenc_q_push(vtctx, sample_buffer);
  364. }
  365. static int get_length_code_size(
  366. AVCodecContext *avctx,
  367. CMSampleBufferRef sample_buffer,
  368. size_t *size)
  369. {
  370. CMVideoFormatDescriptionRef vid_fmt;
  371. int isize;
  372. int status;
  373. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  374. if (!vid_fmt) {
  375. av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
  376. return AVERROR_EXTERNAL;
  377. }
  378. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  379. 0,
  380. NULL,
  381. NULL,
  382. NULL,
  383. &isize);
  384. if (status) {
  385. av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
  386. return AVERROR_EXTERNAL;
  387. }
  388. *size = isize;
  389. return 0;
  390. }
  391. /*
  392. * Returns true on success.
  393. *
  394. * If profile_level_val is NULL and this method returns true, don't specify the
  395. * profile/level to the encoder.
  396. */
  397. static bool get_vt_profile_level(AVCodecContext *avctx,
  398. CFStringRef *profile_level_val)
  399. {
  400. VTEncContext *vtctx = avctx->priv_data;
  401. int64_t profile = vtctx->profile;
  402. if (profile == H264_PROF_AUTO && vtctx->level) {
  403. //Need to pick a profile if level is not auto-selected.
  404. profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
  405. }
  406. *profile_level_val = NULL;
  407. switch (profile) {
  408. case H264_PROF_AUTO:
  409. return true;
  410. case H264_PROF_BASELINE:
  411. switch (vtctx->level) {
  412. case 0: *profile_level_val = kVTProfileLevel_H264_Baseline_AutoLevel; break;
  413. case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
  414. case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
  415. case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
  416. case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
  417. case 40: *profile_level_val = kVTProfileLevel_H264_Baseline_4_0; break;
  418. case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
  419. case 42: *profile_level_val = kVTProfileLevel_H264_Baseline_4_2; break;
  420. case 50: *profile_level_val = kVTProfileLevel_H264_Baseline_5_0; break;
  421. case 51: *profile_level_val = kVTProfileLevel_H264_Baseline_5_1; break;
  422. case 52: *profile_level_val = kVTProfileLevel_H264_Baseline_5_2; break;
  423. }
  424. break;
  425. case H264_PROF_MAIN:
  426. switch (vtctx->level) {
  427. case 0: *profile_level_val = kVTProfileLevel_H264_Main_AutoLevel; break;
  428. case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
  429. case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
  430. case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
  431. case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
  432. case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
  433. case 42: *profile_level_val = kVTProfileLevel_H264_Main_4_2; break;
  434. case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
  435. case 51: *profile_level_val = kVTProfileLevel_H264_Main_5_1; break;
  436. case 52: *profile_level_val = kVTProfileLevel_H264_Main_5_2; break;
  437. }
  438. break;
  439. case H264_PROF_HIGH:
  440. switch (vtctx->level) {
  441. case 0: *profile_level_val = kVTProfileLevel_H264_High_AutoLevel; break;
  442. case 30: *profile_level_val = kVTProfileLevel_H264_High_3_0; break;
  443. case 31: *profile_level_val = kVTProfileLevel_H264_High_3_1; break;
  444. case 32: *profile_level_val = kVTProfileLevel_H264_High_3_2; break;
  445. case 40: *profile_level_val = kVTProfileLevel_H264_High_4_0; break;
  446. case 41: *profile_level_val = kVTProfileLevel_H264_High_4_1; break;
  447. case 42: *profile_level_val = kVTProfileLevel_H264_High_4_2; break;
  448. case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
  449. case 51: *profile_level_val = kVTProfileLevel_H264_High_5_1; break;
  450. case 52: *profile_level_val = kVTProfileLevel_H264_High_5_2; break;
  451. }
  452. break;
  453. }
  454. if (!*profile_level_val) {
  455. av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
  456. return false;
  457. }
  458. return true;
  459. }
  460. static int get_cv_pixel_format(AVCodecContext* avctx,
  461. enum AVPixelFormat fmt,
  462. enum AVColorRange range,
  463. int* av_pixel_format,
  464. int* range_guessed)
  465. {
  466. if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
  467. range != AVCOL_RANGE_JPEG;
  468. //MPEG range is used when no range is set
  469. if (fmt == AV_PIX_FMT_NV12) {
  470. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  471. kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
  472. kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  473. } else if (fmt == AV_PIX_FMT_YUV420P) {
  474. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  475. kCVPixelFormatType_420YpCbCr8PlanarFullRange :
  476. kCVPixelFormatType_420YpCbCr8Planar;
  477. } else {
  478. return AVERROR(EINVAL);
  479. }
  480. return 0;
  481. }
  482. static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
  483. VTEncContext *vtctx = avctx->priv_data;
  484. if (vtctx->color_primaries) {
  485. CFDictionarySetValue(dict,
  486. kCVImageBufferColorPrimariesKey,
  487. vtctx->color_primaries);
  488. }
  489. if (vtctx->transfer_function) {
  490. CFDictionarySetValue(dict,
  491. kCVImageBufferTransferFunctionKey,
  492. vtctx->transfer_function);
  493. }
  494. if (vtctx->ycbcr_matrix) {
  495. CFDictionarySetValue(dict,
  496. kCVImageBufferYCbCrMatrixKey,
  497. vtctx->ycbcr_matrix);
  498. }
  499. }
  500. static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
  501. CFMutableDictionaryRef* dict)
  502. {
  503. CFNumberRef cv_color_format_num = NULL;
  504. CFNumberRef width_num = NULL;
  505. CFNumberRef height_num = NULL;
  506. CFMutableDictionaryRef pixel_buffer_info = NULL;
  507. int cv_color_format;
  508. int status = get_cv_pixel_format(avctx,
  509. avctx->pix_fmt,
  510. avctx->color_range,
  511. &cv_color_format,
  512. NULL);
  513. if (status) return status;
  514. pixel_buffer_info = CFDictionaryCreateMutable(
  515. kCFAllocatorDefault,
  516. 20,
  517. &kCFCopyStringDictionaryKeyCallBacks,
  518. &kCFTypeDictionaryValueCallBacks);
  519. if (!pixel_buffer_info) goto pbinfo_nomem;
  520. cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
  521. kCFNumberSInt32Type,
  522. &cv_color_format);
  523. if (!cv_color_format_num) goto pbinfo_nomem;
  524. CFDictionarySetValue(pixel_buffer_info,
  525. kCVPixelBufferPixelFormatTypeKey,
  526. cv_color_format_num);
  527. vt_release_num(&cv_color_format_num);
  528. width_num = CFNumberCreate(kCFAllocatorDefault,
  529. kCFNumberSInt32Type,
  530. &avctx->width);
  531. if (!width_num) return AVERROR(ENOMEM);
  532. CFDictionarySetValue(pixel_buffer_info,
  533. kCVPixelBufferWidthKey,
  534. width_num);
  535. vt_release_num(&width_num);
  536. height_num = CFNumberCreate(kCFAllocatorDefault,
  537. kCFNumberSInt32Type,
  538. &avctx->height);
  539. if (!height_num) goto pbinfo_nomem;
  540. CFDictionarySetValue(pixel_buffer_info,
  541. kCVPixelBufferHeightKey,
  542. height_num);
  543. vt_release_num(&height_num);
  544. add_color_attr(avctx, pixel_buffer_info);
  545. *dict = pixel_buffer_info;
  546. return 0;
  547. pbinfo_nomem:
  548. vt_release_num(&cv_color_format_num);
  549. vt_release_num(&width_num);
  550. vt_release_num(&height_num);
  551. if (pixel_buffer_info) CFRelease(pixel_buffer_info);
  552. return AVERROR(ENOMEM);
  553. }
  554. static int get_cv_color_primaries(AVCodecContext *avctx,
  555. CFStringRef *primaries)
  556. {
  557. enum AVColorPrimaries pri = avctx->color_primaries;
  558. switch (pri) {
  559. case AVCOL_PRI_UNSPECIFIED:
  560. *primaries = NULL;
  561. break;
  562. case AVCOL_PRI_BT709:
  563. *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
  564. break;
  565. case AVCOL_PRI_BT2020:
  566. *primaries = kCVImageBufferColorPrimaries_ITU_R_2020;
  567. break;
  568. default:
  569. av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
  570. *primaries = NULL;
  571. return -1;
  572. }
  573. return 0;
  574. }
  575. static int get_cv_transfer_function(AVCodecContext *avctx,
  576. CFStringRef *transfer_fnc,
  577. CFNumberRef *gamma_level)
  578. {
  579. enum AVColorTransferCharacteristic trc = avctx->color_trc;
  580. Float32 gamma;
  581. *gamma_level = NULL;
  582. switch (trc) {
  583. case AVCOL_TRC_UNSPECIFIED:
  584. *transfer_fnc = NULL;
  585. break;
  586. case AVCOL_TRC_BT709:
  587. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
  588. break;
  589. case AVCOL_TRC_SMPTE240M:
  590. *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
  591. break;
  592. case AVCOL_TRC_GAMMA22:
  593. gamma = 2.2;
  594. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  595. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  596. break;
  597. case AVCOL_TRC_GAMMA28:
  598. gamma = 2.8;
  599. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  600. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  601. break;
  602. case AVCOL_TRC_BT2020_10:
  603. case AVCOL_TRC_BT2020_12:
  604. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2020;
  605. break;
  606. default:
  607. av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
  608. return -1;
  609. }
  610. return 0;
  611. }
  612. static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
  613. switch(avctx->colorspace) {
  614. case AVCOL_SPC_BT709:
  615. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
  616. break;
  617. case AVCOL_SPC_UNSPECIFIED:
  618. *matrix = NULL;
  619. break;
  620. case AVCOL_SPC_BT470BG:
  621. case AVCOL_SPC_SMPTE170M:
  622. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
  623. break;
  624. case AVCOL_SPC_SMPTE240M:
  625. *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
  626. break;
  627. case AVCOL_SPC_BT2020_NCL:
  628. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_2020;
  629. break;
  630. default:
  631. av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
  632. return -1;
  633. }
  634. return 0;
  635. }
  636. static int vtenc_create_encoder(AVCodecContext *avctx,
  637. CMVideoCodecType codec_type,
  638. CFStringRef profile_level,
  639. CFNumberRef gamma_level,
  640. CFDictionaryRef enc_info,
  641. CFDictionaryRef pixel_buffer_info,
  642. VTCompressionSessionRef *session)
  643. {
  644. VTEncContext *vtctx = avctx->priv_data;
  645. SInt32 bit_rate = avctx->bit_rate;
  646. CFNumberRef bit_rate_num;
  647. int status = VTCompressionSessionCreate(kCFAllocatorDefault,
  648. avctx->width,
  649. avctx->height,
  650. codec_type,
  651. enc_info,
  652. pixel_buffer_info,
  653. kCFAllocatorDefault,
  654. vtenc_output_callback,
  655. avctx,
  656. session);
  657. if (status || !vtctx->session) {
  658. av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
  659. #if !TARGET_OS_IPHONE
  660. if (!vtctx->allow_sw) {
  661. av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
  662. }
  663. #endif
  664. return AVERROR_EXTERNAL;
  665. }
  666. bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
  667. kCFNumberSInt32Type,
  668. &bit_rate);
  669. if (!bit_rate_num) return AVERROR(ENOMEM);
  670. status = VTSessionSetProperty(vtctx->session,
  671. kVTCompressionPropertyKey_AverageBitRate,
  672. bit_rate_num);
  673. CFRelease(bit_rate_num);
  674. if (status) {
  675. av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
  676. return AVERROR_EXTERNAL;
  677. }
  678. if (profile_level) {
  679. status = VTSessionSetProperty(vtctx->session,
  680. kVTCompressionPropertyKey_ProfileLevel,
  681. profile_level);
  682. if (status) {
  683. av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
  684. return AVERROR_EXTERNAL;
  685. }
  686. }
  687. if (avctx->gop_size > 0) {
  688. CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
  689. kCFNumberIntType,
  690. &avctx->gop_size);
  691. if (!interval) {
  692. return AVERROR(ENOMEM);
  693. }
  694. status = VTSessionSetProperty(vtctx->session,
  695. kVTCompressionPropertyKey_MaxKeyFrameInterval,
  696. interval);
  697. CFRelease(interval);
  698. if (status) {
  699. av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
  700. return AVERROR_EXTERNAL;
  701. }
  702. }
  703. if (vtctx->frames_before) {
  704. status = VTSessionSetProperty(vtctx->session,
  705. kVTCompressionPropertyKey_MoreFramesBeforeStart,
  706. kCFBooleanTrue);
  707. if (status == kVTPropertyNotSupportedErr) {
  708. av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
  709. } else if (status) {
  710. av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
  711. }
  712. }
  713. if (vtctx->frames_after) {
  714. status = VTSessionSetProperty(vtctx->session,
  715. kVTCompressionPropertyKey_MoreFramesAfterEnd,
  716. kCFBooleanTrue);
  717. if (status == kVTPropertyNotSupportedErr) {
  718. av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
  719. } else if (status) {
  720. av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
  721. }
  722. }
  723. if (avctx->sample_aspect_ratio.num != 0) {
  724. CFNumberRef num;
  725. CFNumberRef den;
  726. CFMutableDictionaryRef par;
  727. AVRational *avpar = &avctx->sample_aspect_ratio;
  728. av_reduce(&avpar->num, &avpar->den,
  729. avpar->num, avpar->den,
  730. 0xFFFFFFFF);
  731. num = CFNumberCreate(kCFAllocatorDefault,
  732. kCFNumberIntType,
  733. &avpar->num);
  734. den = CFNumberCreate(kCFAllocatorDefault,
  735. kCFNumberIntType,
  736. &avpar->den);
  737. par = CFDictionaryCreateMutable(kCFAllocatorDefault,
  738. 2,
  739. &kCFCopyStringDictionaryKeyCallBacks,
  740. &kCFTypeDictionaryValueCallBacks);
  741. if (!par || !num || !den) {
  742. if (par) CFRelease(par);
  743. if (num) CFRelease(num);
  744. if (den) CFRelease(den);
  745. return AVERROR(ENOMEM);
  746. }
  747. CFDictionarySetValue(
  748. par,
  749. kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
  750. num);
  751. CFDictionarySetValue(
  752. par,
  753. kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
  754. den);
  755. status = VTSessionSetProperty(vtctx->session,
  756. kVTCompressionPropertyKey_PixelAspectRatio,
  757. par);
  758. CFRelease(par);
  759. CFRelease(num);
  760. CFRelease(den);
  761. if (status) {
  762. av_log(avctx,
  763. AV_LOG_ERROR,
  764. "Error setting pixel aspect ratio to %d:%d: %d.\n",
  765. avctx->sample_aspect_ratio.num,
  766. avctx->sample_aspect_ratio.den,
  767. status);
  768. return AVERROR_EXTERNAL;
  769. }
  770. }
  771. if (vtctx->transfer_function) {
  772. status = VTSessionSetProperty(vtctx->session,
  773. kVTCompressionPropertyKey_TransferFunction,
  774. vtctx->transfer_function);
  775. if (status) {
  776. av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
  777. }
  778. }
  779. if (vtctx->ycbcr_matrix) {
  780. status = VTSessionSetProperty(vtctx->session,
  781. kVTCompressionPropertyKey_YCbCrMatrix,
  782. vtctx->ycbcr_matrix);
  783. if (status) {
  784. av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
  785. }
  786. }
  787. if (vtctx->color_primaries) {
  788. status = VTSessionSetProperty(vtctx->session,
  789. kVTCompressionPropertyKey_ColorPrimaries,
  790. vtctx->color_primaries);
  791. if (status) {
  792. av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
  793. }
  794. }
  795. if (gamma_level) {
  796. status = VTSessionSetProperty(vtctx->session,
  797. kCVImageBufferGammaLevelKey,
  798. gamma_level);
  799. if (status) {
  800. av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
  801. }
  802. }
  803. if (!vtctx->has_b_frames) {
  804. status = VTSessionSetProperty(vtctx->session,
  805. kVTCompressionPropertyKey_AllowFrameReordering,
  806. kCFBooleanFalse);
  807. if (status) {
  808. av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
  809. return AVERROR_EXTERNAL;
  810. }
  811. }
  812. if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
  813. CFStringRef entropy = vtctx->entropy == VT_CABAC ?
  814. kVTH264EntropyMode_CABAC:
  815. kVTH264EntropyMode_CAVLC;
  816. status = VTSessionSetProperty(vtctx->session,
  817. kVTCompressionPropertyKey_H264EntropyMode,
  818. entropy);
  819. if (status) {
  820. av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
  821. return AVERROR_EXTERNAL;
  822. }
  823. }
  824. if (vtctx->realtime) {
  825. status = VTSessionSetProperty(vtctx->session,
  826. kVTCompressionPropertyKey_RealTime,
  827. kCFBooleanTrue);
  828. if (status) {
  829. av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
  830. }
  831. }
  832. status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
  833. if (status) {
  834. av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
  835. return AVERROR_EXTERNAL;
  836. }
  837. return 0;
  838. }
  839. static av_cold int vtenc_init(AVCodecContext *avctx)
  840. {
  841. CFMutableDictionaryRef enc_info;
  842. CFMutableDictionaryRef pixel_buffer_info;
  843. CMVideoCodecType codec_type;
  844. VTEncContext *vtctx = avctx->priv_data;
  845. CFStringRef profile_level;
  846. CFBooleanRef has_b_frames_cfbool;
  847. CFNumberRef gamma_level = NULL;
  848. int status;
  849. codec_type = get_cm_codec_type(avctx->codec_id);
  850. if (!codec_type) {
  851. av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
  852. return AVERROR(EINVAL);
  853. }
  854. vtctx->has_b_frames = avctx->max_b_frames > 0;
  855. if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
  856. av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
  857. vtctx->has_b_frames = false;
  858. }
  859. if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
  860. av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
  861. vtctx->entropy = VT_ENTROPY_NOT_SET;
  862. }
  863. if (!get_vt_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
  864. vtctx->session = NULL;
  865. enc_info = CFDictionaryCreateMutable(
  866. kCFAllocatorDefault,
  867. 20,
  868. &kCFCopyStringDictionaryKeyCallBacks,
  869. &kCFTypeDictionaryValueCallBacks
  870. );
  871. if (!enc_info) return AVERROR(ENOMEM);
  872. #if !TARGET_OS_IPHONE
  873. if (!vtctx->allow_sw) {
  874. CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
  875. } else {
  876. CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
  877. }
  878. #endif
  879. if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
  880. status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
  881. if (status)
  882. goto init_cleanup;
  883. } else {
  884. pixel_buffer_info = NULL;
  885. }
  886. pthread_mutex_init(&vtctx->lock, NULL);
  887. pthread_cond_init(&vtctx->cv_sample_sent, NULL);
  888. vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
  889. get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
  890. get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
  891. get_cv_color_primaries(avctx, &vtctx->color_primaries);
  892. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  893. status = vtenc_populate_extradata(avctx,
  894. codec_type,
  895. profile_level,
  896. gamma_level,
  897. enc_info,
  898. pixel_buffer_info);
  899. if (status)
  900. goto init_cleanup;
  901. }
  902. status = vtenc_create_encoder(avctx,
  903. codec_type,
  904. profile_level,
  905. gamma_level,
  906. enc_info,
  907. pixel_buffer_info,
  908. &vtctx->session);
  909. if (status < 0)
  910. goto init_cleanup;
  911. status = VTSessionCopyProperty(vtctx->session,
  912. kVTCompressionPropertyKey_AllowFrameReordering,
  913. kCFAllocatorDefault,
  914. &has_b_frames_cfbool);
  915. if (!status) {
  916. //Some devices don't output B-frames for main profile, even if requested.
  917. vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
  918. CFRelease(has_b_frames_cfbool);
  919. }
  920. avctx->has_b_frames = vtctx->has_b_frames;
  921. init_cleanup:
  922. if (gamma_level)
  923. CFRelease(gamma_level);
  924. if (pixel_buffer_info)
  925. CFRelease(pixel_buffer_info);
  926. CFRelease(enc_info);
  927. return status;
  928. }
  929. static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
  930. {
  931. CFArrayRef attachments;
  932. CFDictionaryRef attachment;
  933. CFBooleanRef not_sync;
  934. CFIndex len;
  935. attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
  936. len = !attachments ? 0 : CFArrayGetCount(attachments);
  937. if (!len) {
  938. *is_key_frame = true;
  939. return;
  940. }
  941. attachment = CFArrayGetValueAtIndex(attachments, 0);
  942. if (CFDictionaryGetValueIfPresent(attachment,
  943. kCMSampleAttachmentKey_NotSync,
  944. (const void **)&not_sync))
  945. {
  946. *is_key_frame = !CFBooleanGetValue(not_sync);
  947. } else {
  948. *is_key_frame = true;
  949. }
  950. }
  951. /**
  952. * Copies NAL units and replaces length codes with
  953. * H.264 Annex B start codes. On failure, the contents of
  954. * dst_data may have been modified.
  955. *
  956. * @param length_code_size Byte length of each length code
  957. * @param src_data NAL units prefixed with length codes.
  958. * @param src_size Length of buffer, excluding any padding.
  959. * @param dst_data Must be zeroed before calling this function.
  960. * Contains the copied NAL units prefixed with
  961. * start codes when the function returns
  962. * successfully.
  963. * @param dst_size Length of dst_data
  964. * @return 0 on success
  965. * AVERROR_INVALIDDATA if length_code_size is invalid
  966. * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
  967. * or if a length_code in src_data specifies data beyond
  968. * the end of its buffer.
  969. */
  970. static int copy_replace_length_codes(
  971. AVCodecContext *avctx,
  972. size_t length_code_size,
  973. CMSampleBufferRef sample_buffer,
  974. uint8_t *dst_data,
  975. size_t dst_size)
  976. {
  977. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  978. size_t remaining_src_size = src_size;
  979. size_t remaining_dst_size = dst_size;
  980. size_t src_offset = 0;
  981. int status;
  982. uint8_t size_buf[4];
  983. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  984. if (length_code_size > 4) {
  985. return AVERROR_INVALIDDATA;
  986. }
  987. while (remaining_src_size > 0) {
  988. size_t curr_src_len;
  989. size_t curr_dst_len;
  990. size_t box_len = 0;
  991. size_t i;
  992. uint8_t *dst_box;
  993. status = CMBlockBufferCopyDataBytes(block,
  994. src_offset,
  995. length_code_size,
  996. size_buf);
  997. if (status) {
  998. av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
  999. return AVERROR_EXTERNAL;
  1000. }
  1001. for (i = 0; i < length_code_size; i++) {
  1002. box_len <<= 8;
  1003. box_len |= size_buf[i];
  1004. }
  1005. curr_src_len = box_len + length_code_size;
  1006. curr_dst_len = box_len + sizeof(start_code);
  1007. if (remaining_src_size < curr_src_len) {
  1008. return AVERROR_BUFFER_TOO_SMALL;
  1009. }
  1010. if (remaining_dst_size < curr_dst_len) {
  1011. return AVERROR_BUFFER_TOO_SMALL;
  1012. }
  1013. dst_box = dst_data + sizeof(start_code);
  1014. memcpy(dst_data, start_code, sizeof(start_code));
  1015. status = CMBlockBufferCopyDataBytes(block,
  1016. src_offset + length_code_size,
  1017. box_len,
  1018. dst_box);
  1019. if (status) {
  1020. av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
  1021. return AVERROR_EXTERNAL;
  1022. }
  1023. src_offset += curr_src_len;
  1024. dst_data += curr_dst_len;
  1025. remaining_src_size -= curr_src_len;
  1026. remaining_dst_size -= curr_dst_len;
  1027. }
  1028. return 0;
  1029. }
  1030. static int vtenc_cm_to_avpacket(
  1031. AVCodecContext *avctx,
  1032. CMSampleBufferRef sample_buffer,
  1033. AVPacket *pkt)
  1034. {
  1035. VTEncContext *vtctx = avctx->priv_data;
  1036. int status;
  1037. bool is_key_frame;
  1038. bool add_header;
  1039. size_t length_code_size;
  1040. size_t header_size = 0;
  1041. size_t in_buf_size;
  1042. size_t out_buf_size;
  1043. int64_t dts_delta;
  1044. int64_t time_base_num;
  1045. int nalu_count;
  1046. CMTime pts;
  1047. CMTime dts;
  1048. CMVideoFormatDescriptionRef vid_fmt;
  1049. vtenc_get_frame_info(sample_buffer, &is_key_frame);
  1050. status = get_length_code_size(avctx, sample_buffer, &length_code_size);
  1051. if (status) return status;
  1052. add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
  1053. if (add_header) {
  1054. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  1055. if (!vid_fmt) {
  1056. av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
  1057. return AVERROR_EXTERNAL;
  1058. }
  1059. int status = get_params_size(avctx, vid_fmt, &header_size);
  1060. if (status) return status;
  1061. }
  1062. status = count_nalus(length_code_size, sample_buffer, &nalu_count);
  1063. if(status)
  1064. return status;
  1065. in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1066. out_buf_size = header_size +
  1067. in_buf_size +
  1068. nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
  1069. status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
  1070. if (status < 0)
  1071. return status;
  1072. if (add_header) {
  1073. status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
  1074. if(status) return status;
  1075. }
  1076. status = copy_replace_length_codes(
  1077. avctx,
  1078. length_code_size,
  1079. sample_buffer,
  1080. pkt->data + header_size,
  1081. pkt->size - header_size
  1082. );
  1083. if (status) {
  1084. av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d", status);
  1085. return status;
  1086. }
  1087. if (is_key_frame) {
  1088. pkt->flags |= AV_PKT_FLAG_KEY;
  1089. }
  1090. pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
  1091. dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
  1092. if (CMTIME_IS_INVALID(dts)) {
  1093. if (!vtctx->has_b_frames) {
  1094. dts = pts;
  1095. } else {
  1096. av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
  1097. return AVERROR_EXTERNAL;
  1098. }
  1099. }
  1100. dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
  1101. time_base_num = avctx->time_base.num;
  1102. pkt->pts = pts.value / time_base_num;
  1103. pkt->dts = dts.value / time_base_num - dts_delta;
  1104. pkt->size = out_buf_size;
  1105. return 0;
  1106. }
  1107. /*
  1108. * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
  1109. * containing all planes if so.
  1110. */
  1111. static int get_cv_pixel_info(
  1112. AVCodecContext *avctx,
  1113. const AVFrame *frame,
  1114. int *color,
  1115. int *plane_count,
  1116. size_t *widths,
  1117. size_t *heights,
  1118. size_t *strides,
  1119. size_t *contiguous_buf_size)
  1120. {
  1121. VTEncContext *vtctx = avctx->priv_data;
  1122. int av_format = frame->format;
  1123. int av_color_range = av_frame_get_color_range(frame);
  1124. int i;
  1125. int range_guessed;
  1126. int status;
  1127. status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
  1128. if (status) {
  1129. av_log(avctx,
  1130. AV_LOG_ERROR,
  1131. "Could not get pixel format for color format '%s' range '%s'.\n",
  1132. av_get_pix_fmt_name(av_format),
  1133. av_color_range > AVCOL_RANGE_UNSPECIFIED &&
  1134. av_color_range < AVCOL_RANGE_NB ?
  1135. av_color_range_name(av_color_range) :
  1136. "Unknown");
  1137. return AVERROR(EINVAL);
  1138. }
  1139. if (range_guessed) {
  1140. if (!vtctx->warned_color_range) {
  1141. vtctx->warned_color_range = true;
  1142. av_log(avctx,
  1143. AV_LOG_WARNING,
  1144. "Color range not set for %s. Using MPEG range.\n",
  1145. av_get_pix_fmt_name(av_format));
  1146. }
  1147. av_log(avctx, AV_LOG_WARNING, "");
  1148. }
  1149. switch (av_format) {
  1150. case AV_PIX_FMT_NV12:
  1151. *plane_count = 2;
  1152. widths [0] = avctx->width;
  1153. heights[0] = avctx->height;
  1154. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1155. widths [1] = (avctx->width + 1) / 2;
  1156. heights[1] = (avctx->height + 1) / 2;
  1157. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
  1158. break;
  1159. case AV_PIX_FMT_YUV420P:
  1160. *plane_count = 3;
  1161. widths [0] = avctx->width;
  1162. heights[0] = avctx->height;
  1163. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1164. widths [1] = (avctx->width + 1) / 2;
  1165. heights[1] = (avctx->height + 1) / 2;
  1166. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
  1167. widths [2] = (avctx->width + 1) / 2;
  1168. heights[2] = (avctx->height + 1) / 2;
  1169. strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
  1170. break;
  1171. default:
  1172. av_log(
  1173. avctx,
  1174. AV_LOG_ERROR,
  1175. "Could not get frame format info for color %d range %d.\n",
  1176. av_format,
  1177. av_color_range);
  1178. return AVERROR(EINVAL);
  1179. }
  1180. *contiguous_buf_size = 0;
  1181. for (i = 0; i < *plane_count; i++) {
  1182. if (i < *plane_count - 1 &&
  1183. frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
  1184. *contiguous_buf_size = 0;
  1185. break;
  1186. }
  1187. *contiguous_buf_size += strides[i] * heights[i];
  1188. }
  1189. return 0;
  1190. }
  1191. #if !TARGET_OS_IPHONE
  1192. //Not used on iOS - frame is always copied.
  1193. static void free_avframe(
  1194. void *release_ctx,
  1195. const void *data,
  1196. size_t size,
  1197. size_t plane_count,
  1198. const void *plane_addresses[])
  1199. {
  1200. AVFrame *frame = release_ctx;
  1201. av_frame_free(&frame);
  1202. }
  1203. #else
  1204. //Not used on OSX - frame is never copied.
  1205. static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
  1206. const AVFrame *frame,
  1207. CVPixelBufferRef cv_img,
  1208. const size_t *plane_strides,
  1209. const size_t *plane_rows)
  1210. {
  1211. int i, j;
  1212. size_t plane_count;
  1213. int status;
  1214. int rows;
  1215. int src_stride;
  1216. int dst_stride;
  1217. uint8_t *src_addr;
  1218. uint8_t *dst_addr;
  1219. size_t copy_bytes;
  1220. status = CVPixelBufferLockBaseAddress(cv_img, 0);
  1221. if (status) {
  1222. av_log(
  1223. avctx,
  1224. AV_LOG_ERROR,
  1225. "Error: Could not lock base address of CVPixelBuffer: %d.\n",
  1226. status
  1227. );
  1228. }
  1229. if (CVPixelBufferIsPlanar(cv_img)) {
  1230. plane_count = CVPixelBufferGetPlaneCount(cv_img);
  1231. for (i = 0; frame->data[i]; i++) {
  1232. if (i == plane_count) {
  1233. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1234. av_log(avctx,
  1235. AV_LOG_ERROR,
  1236. "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
  1237. );
  1238. return AVERROR_EXTERNAL;
  1239. }
  1240. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
  1241. src_addr = (uint8_t*)frame->data[i];
  1242. dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
  1243. src_stride = plane_strides[i];
  1244. rows = plane_rows[i];
  1245. if (dst_stride == src_stride) {
  1246. memcpy(dst_addr, src_addr, src_stride * rows);
  1247. } else {
  1248. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1249. for (j = 0; j < rows; j++) {
  1250. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1251. }
  1252. }
  1253. }
  1254. } else {
  1255. if (frame->data[1]) {
  1256. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1257. av_log(avctx,
  1258. AV_LOG_ERROR,
  1259. "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
  1260. );
  1261. return AVERROR_EXTERNAL;
  1262. }
  1263. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
  1264. src_addr = (uint8_t*)frame->data[0];
  1265. dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
  1266. src_stride = plane_strides[0];
  1267. rows = plane_rows[0];
  1268. if (dst_stride == src_stride) {
  1269. memcpy(dst_addr, src_addr, src_stride * rows);
  1270. } else {
  1271. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1272. for (j = 0; j < rows; j++) {
  1273. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1274. }
  1275. }
  1276. }
  1277. status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1278. if (status) {
  1279. av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
  1280. return AVERROR_EXTERNAL;
  1281. }
  1282. return 0;
  1283. }
  1284. #endif //!TARGET_OS_IPHONE
  1285. static int create_cv_pixel_buffer(AVCodecContext *avctx,
  1286. const AVFrame *frame,
  1287. CVPixelBufferRef *cv_img)
  1288. {
  1289. int plane_count;
  1290. int color;
  1291. size_t widths [AV_NUM_DATA_POINTERS];
  1292. size_t heights[AV_NUM_DATA_POINTERS];
  1293. size_t strides[AV_NUM_DATA_POINTERS];
  1294. int status;
  1295. size_t contiguous_buf_size;
  1296. #if TARGET_OS_IPHONE
  1297. CVPixelBufferPoolRef pix_buf_pool;
  1298. VTEncContext* vtctx = avctx->priv_data;
  1299. #else
  1300. CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
  1301. kCFAllocatorDefault,
  1302. 10,
  1303. &kCFCopyStringDictionaryKeyCallBacks,
  1304. &kCFTypeDictionaryValueCallBacks);
  1305. if (!pix_buf_attachments) return AVERROR(ENOMEM);
  1306. #endif
  1307. if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
  1308. av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
  1309. *cv_img = (CVPixelBufferRef)frame->data[3];
  1310. av_assert0(*cv_img);
  1311. CFRetain(*cv_img);
  1312. return 0;
  1313. }
  1314. memset(widths, 0, sizeof(widths));
  1315. memset(heights, 0, sizeof(heights));
  1316. memset(strides, 0, sizeof(strides));
  1317. status = get_cv_pixel_info(
  1318. avctx,
  1319. frame,
  1320. &color,
  1321. &plane_count,
  1322. widths,
  1323. heights,
  1324. strides,
  1325. &contiguous_buf_size
  1326. );
  1327. if (status) {
  1328. av_log(
  1329. avctx,
  1330. AV_LOG_ERROR,
  1331. "Error: Cannot convert format %d color_range %d: %d\n",
  1332. frame->format,
  1333. av_frame_get_color_range(frame),
  1334. status
  1335. );
  1336. return AVERROR_EXTERNAL;
  1337. }
  1338. #if TARGET_OS_IPHONE
  1339. pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
  1340. if (!pix_buf_pool) {
  1341. av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
  1342. return AVERROR_EXTERNAL;
  1343. }
  1344. status = CVPixelBufferPoolCreatePixelBuffer(NULL,
  1345. pix_buf_pool,
  1346. cv_img);
  1347. if (status) {
  1348. av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
  1349. return AVERROR_EXTERNAL;
  1350. }
  1351. status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
  1352. if (status) {
  1353. CFRelease(*cv_img);
  1354. *cv_img = NULL;
  1355. return status;
  1356. }
  1357. #else
  1358. AVFrame *enc_frame = av_frame_alloc();
  1359. if (!enc_frame) return AVERROR(ENOMEM);
  1360. status = av_frame_ref(enc_frame, frame);
  1361. if (status) {
  1362. av_frame_free(&enc_frame);
  1363. return status;
  1364. }
  1365. status = CVPixelBufferCreateWithPlanarBytes(
  1366. kCFAllocatorDefault,
  1367. enc_frame->width,
  1368. enc_frame->height,
  1369. color,
  1370. NULL,
  1371. contiguous_buf_size,
  1372. plane_count,
  1373. (void **)enc_frame->data,
  1374. widths,
  1375. heights,
  1376. strides,
  1377. free_avframe,
  1378. enc_frame,
  1379. NULL,
  1380. cv_img
  1381. );
  1382. add_color_attr(avctx, pix_buf_attachments);
  1383. CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
  1384. CFRelease(pix_buf_attachments);
  1385. if (status) {
  1386. av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
  1387. return AVERROR_EXTERNAL;
  1388. }
  1389. #endif
  1390. return 0;
  1391. }
  1392. static int create_encoder_dict_h264(const AVFrame *frame,
  1393. CFDictionaryRef* dict_out)
  1394. {
  1395. CFDictionaryRef dict = NULL;
  1396. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  1397. const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
  1398. const void *vals[] = { kCFBooleanTrue };
  1399. dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
  1400. if(!dict) return AVERROR(ENOMEM);
  1401. }
  1402. *dict_out = dict;
  1403. return 0;
  1404. }
  1405. static int vtenc_send_frame(AVCodecContext *avctx,
  1406. VTEncContext *vtctx,
  1407. const AVFrame *frame)
  1408. {
  1409. CMTime time;
  1410. CFDictionaryRef frame_dict;
  1411. CVPixelBufferRef cv_img = NULL;
  1412. int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
  1413. if (status) return status;
  1414. status = create_encoder_dict_h264(frame, &frame_dict);
  1415. if (status) {
  1416. CFRelease(cv_img);
  1417. return status;
  1418. }
  1419. time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
  1420. status = VTCompressionSessionEncodeFrame(
  1421. vtctx->session,
  1422. cv_img,
  1423. time,
  1424. kCMTimeInvalid,
  1425. frame_dict,
  1426. NULL,
  1427. NULL
  1428. );
  1429. if (frame_dict) CFRelease(frame_dict);
  1430. CFRelease(cv_img);
  1431. if (status) {
  1432. av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
  1433. return AVERROR_EXTERNAL;
  1434. }
  1435. return 0;
  1436. }
  1437. static av_cold int vtenc_frame(
  1438. AVCodecContext *avctx,
  1439. AVPacket *pkt,
  1440. const AVFrame *frame,
  1441. int *got_packet)
  1442. {
  1443. VTEncContext *vtctx = avctx->priv_data;
  1444. bool get_frame;
  1445. int status;
  1446. CMSampleBufferRef buf = NULL;
  1447. if (frame) {
  1448. status = vtenc_send_frame(avctx, vtctx, frame);
  1449. if (status) {
  1450. status = AVERROR_EXTERNAL;
  1451. goto end_nopkt;
  1452. }
  1453. if (vtctx->frame_ct_in == 0) {
  1454. vtctx->first_pts = frame->pts;
  1455. } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
  1456. vtctx->dts_delta = frame->pts - vtctx->first_pts;
  1457. }
  1458. vtctx->frame_ct_in++;
  1459. } else if(!vtctx->flushing) {
  1460. vtctx->flushing = true;
  1461. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1462. kCMTimeIndefinite);
  1463. if (status) {
  1464. av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
  1465. status = AVERROR_EXTERNAL;
  1466. goto end_nopkt;
  1467. }
  1468. }
  1469. *got_packet = 0;
  1470. get_frame = vtctx->dts_delta >= 0 || !frame;
  1471. if (!get_frame) {
  1472. status = 0;
  1473. goto end_nopkt;
  1474. }
  1475. status = vtenc_q_pop(vtctx, !frame, &buf);
  1476. if (status) goto end_nopkt;
  1477. if (!buf) goto end_nopkt;
  1478. status = vtenc_cm_to_avpacket(avctx, buf, pkt);
  1479. CFRelease(buf);
  1480. if (status) goto end_nopkt;
  1481. *got_packet = 1;
  1482. return 0;
  1483. end_nopkt:
  1484. av_packet_unref(pkt);
  1485. return status;
  1486. }
  1487. static int vtenc_populate_extradata(AVCodecContext *avctx,
  1488. CMVideoCodecType codec_type,
  1489. CFStringRef profile_level,
  1490. CFNumberRef gamma_level,
  1491. CFDictionaryRef enc_info,
  1492. CFDictionaryRef pixel_buffer_info)
  1493. {
  1494. VTEncContext *vtctx = avctx->priv_data;
  1495. AVFrame *frame = av_frame_alloc();
  1496. int y_size = avctx->width * avctx->height;
  1497. int chroma_size = (avctx->width / 2) * (avctx->height / 2);
  1498. CMSampleBufferRef buf = NULL;
  1499. int status;
  1500. if (!frame)
  1501. return AVERROR(ENOMEM);
  1502. frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
  1503. if(!frame->buf[0]){
  1504. status = AVERROR(ENOMEM);
  1505. goto pe_cleanup;
  1506. }
  1507. status = vtenc_create_encoder(avctx,
  1508. codec_type,
  1509. profile_level,
  1510. gamma_level,
  1511. enc_info,
  1512. pixel_buffer_info,
  1513. &vtctx->session);
  1514. if (status)
  1515. goto pe_cleanup;
  1516. frame->data[0] = frame->buf[0]->data;
  1517. memset(frame->data[0], 0, y_size);
  1518. frame->data[1] = frame->buf[0]->data + y_size;
  1519. memset(frame->data[1], 128, chroma_size);
  1520. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  1521. frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
  1522. memset(frame->data[2], 128, chroma_size);
  1523. }
  1524. frame->linesize[0] = avctx->width;
  1525. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  1526. frame->linesize[1] =
  1527. frame->linesize[2] = (avctx->width + 1) / 2;
  1528. } else {
  1529. frame->linesize[1] = (avctx->width + 1) / 2;
  1530. }
  1531. frame->format = avctx->pix_fmt;
  1532. frame->width = avctx->width;
  1533. frame->height = avctx->height;
  1534. av_frame_set_colorspace(frame, avctx->colorspace);
  1535. av_frame_set_color_range(frame, avctx->color_range);
  1536. frame->color_trc = avctx->color_trc;
  1537. frame->color_primaries = avctx->color_primaries;
  1538. frame->pts = 0;
  1539. status = vtenc_send_frame(avctx, vtctx, frame);
  1540. if (status) {
  1541. av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
  1542. goto pe_cleanup;
  1543. }
  1544. //Populates extradata - output frames are flushed and param sets are available.
  1545. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1546. kCMTimeIndefinite);
  1547. if (status)
  1548. goto pe_cleanup;
  1549. status = vtenc_q_pop(vtctx, 0, &buf);
  1550. if (status) {
  1551. av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
  1552. goto pe_cleanup;
  1553. }
  1554. CFRelease(buf);
  1555. pe_cleanup:
  1556. if(vtctx->session)
  1557. CFRelease(vtctx->session);
  1558. vtctx->session = NULL;
  1559. vtctx->frame_ct_out = 0;
  1560. av_frame_unref(frame);
  1561. av_frame_free(&frame);
  1562. av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
  1563. return status;
  1564. }
  1565. static av_cold int vtenc_close(AVCodecContext *avctx)
  1566. {
  1567. VTEncContext *vtctx = avctx->priv_data;
  1568. if(!vtctx->session) return 0;
  1569. pthread_cond_destroy(&vtctx->cv_sample_sent);
  1570. pthread_mutex_destroy(&vtctx->lock);
  1571. CFRelease(vtctx->session);
  1572. vtctx->session = NULL;
  1573. if (vtctx->color_primaries) {
  1574. CFRelease(vtctx->color_primaries);
  1575. vtctx->color_primaries = NULL;
  1576. }
  1577. if (vtctx->transfer_function) {
  1578. CFRelease(vtctx->transfer_function);
  1579. vtctx->transfer_function = NULL;
  1580. }
  1581. if (vtctx->ycbcr_matrix) {
  1582. CFRelease(vtctx->ycbcr_matrix);
  1583. vtctx->ycbcr_matrix = NULL;
  1584. }
  1585. return 0;
  1586. }
  1587. static const enum AVPixelFormat pix_fmts[] = {
  1588. AV_PIX_FMT_VIDEOTOOLBOX,
  1589. AV_PIX_FMT_NV12,
  1590. AV_PIX_FMT_YUV420P,
  1591. AV_PIX_FMT_NONE
  1592. };
  1593. #define OFFSET(x) offsetof(VTEncContext, x)
  1594. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1595. static const AVOption options[] = {
  1596. { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
  1597. { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
  1598. { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
  1599. { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
  1600. { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
  1601. { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
  1602. { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
  1603. { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
  1604. { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
  1605. { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
  1606. { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
  1607. { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
  1608. { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
  1609. { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
  1610. { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
  1611. { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL,
  1612. { .i64 = 0 }, 0, 1, VE },
  1613. { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
  1614. { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  1615. { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  1616. { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  1617. { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  1618. { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).",
  1619. OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1620. { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.",
  1621. OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1622. { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.",
  1623. OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1624. { NULL },
  1625. };
  1626. static const AVClass h264_videotoolbox_class = {
  1627. .class_name = "h264_videotoolbox",
  1628. .item_name = av_default_item_name,
  1629. .option = options,
  1630. .version = LIBAVUTIL_VERSION_INT,
  1631. };
  1632. AVCodec ff_h264_videotoolbox_encoder = {
  1633. .name = "h264_videotoolbox",
  1634. .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
  1635. .type = AVMEDIA_TYPE_VIDEO,
  1636. .id = AV_CODEC_ID_H264,
  1637. .priv_data_size = sizeof(VTEncContext),
  1638. .pix_fmts = pix_fmts,
  1639. .init = vtenc_init,
  1640. .encode2 = vtenc_frame,
  1641. .close = vtenc_close,
  1642. .capabilities = AV_CODEC_CAP_DELAY,
  1643. .priv_class = &h264_videotoolbox_class,
  1644. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  1645. FF_CODEC_CAP_INIT_CLEANUP,
  1646. };