You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2445 lines
78KB

  1. /*
  2. * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <VideoToolbox/VideoToolbox.h>
  21. #include <CoreVideo/CoreVideo.h>
  22. #include <CoreMedia/CoreMedia.h>
  23. #include <TargetConditionals.h>
  24. #include <Availability.h>
  25. #include "avcodec.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/atomic.h"
  29. #include "libavutil/avstring.h"
  30. #include "libavcodec/avcodec.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "internal.h"
  33. #include <pthread.h>
  34. #include "h264.h"
  35. #include "h264_sei.h"
  36. #include <dlfcn.h>
  37. //These symbols may not be present
  38. static struct{
  39. CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
  40. CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
  41. CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
  42. CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
  43. CFStringRef kVTH264EntropyMode_CAVLC;
  44. CFStringRef kVTH264EntropyMode_CABAC;
  45. CFStringRef kVTProfileLevel_H264_Baseline_4_0;
  46. CFStringRef kVTProfileLevel_H264_Baseline_4_2;
  47. CFStringRef kVTProfileLevel_H264_Baseline_5_0;
  48. CFStringRef kVTProfileLevel_H264_Baseline_5_1;
  49. CFStringRef kVTProfileLevel_H264_Baseline_5_2;
  50. CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
  51. CFStringRef kVTProfileLevel_H264_Main_4_2;
  52. CFStringRef kVTProfileLevel_H264_Main_5_1;
  53. CFStringRef kVTProfileLevel_H264_Main_5_2;
  54. CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
  55. CFStringRef kVTProfileLevel_H264_High_3_0;
  56. CFStringRef kVTProfileLevel_H264_High_3_1;
  57. CFStringRef kVTProfileLevel_H264_High_3_2;
  58. CFStringRef kVTProfileLevel_H264_High_4_0;
  59. CFStringRef kVTProfileLevel_H264_High_4_1;
  60. CFStringRef kVTProfileLevel_H264_High_4_2;
  61. CFStringRef kVTProfileLevel_H264_High_5_1;
  62. CFStringRef kVTProfileLevel_H264_High_5_2;
  63. CFStringRef kVTProfileLevel_H264_High_AutoLevel;
  64. CFStringRef kVTCompressionPropertyKey_RealTime;
  65. CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
  66. CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
  67. } compat_keys;
  68. #define GET_SYM(symbol, defaultVal) \
  69. do{ \
  70. CFStringRef cfstr = dlsym(RTLD_DEFAULT, #symbol); \
  71. if(!cfstr) \
  72. compat_keys.symbol = CFSTR(defaultVal); \
  73. else \
  74. compat_keys.symbol = symbol; \
  75. }while(0)
  76. static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
  77. static void loadVTEncSymbols(){
  78. GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020, "ITU_R_2020");
  79. GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
  80. GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020, "ITU_R_2020");
  81. GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
  82. GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
  83. GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
  84. GET_SYM(kVTProfileLevel_H264_Baseline_4_0, "H264_Baseline_4_0");
  85. GET_SYM(kVTProfileLevel_H264_Baseline_4_2, "H264_Baseline_4_2");
  86. GET_SYM(kVTProfileLevel_H264_Baseline_5_0, "H264_Baseline_5_0");
  87. GET_SYM(kVTProfileLevel_H264_Baseline_5_1, "H264_Baseline_5_1");
  88. GET_SYM(kVTProfileLevel_H264_Baseline_5_2, "H264_Baseline_5_2");
  89. GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
  90. GET_SYM(kVTProfileLevel_H264_Main_4_2, "H264_Main_4_2");
  91. GET_SYM(kVTProfileLevel_H264_Main_5_1, "H264_Main_5_1");
  92. GET_SYM(kVTProfileLevel_H264_Main_5_2, "H264_Main_5_2");
  93. GET_SYM(kVTProfileLevel_H264_Main_AutoLevel, "H264_Main_AutoLevel");
  94. GET_SYM(kVTProfileLevel_H264_High_3_0, "H264_High_3_0");
  95. GET_SYM(kVTProfileLevel_H264_High_3_1, "H264_High_3_1");
  96. GET_SYM(kVTProfileLevel_H264_High_3_2, "H264_High_3_2");
  97. GET_SYM(kVTProfileLevel_H264_High_4_0, "H264_High_4_0");
  98. GET_SYM(kVTProfileLevel_H264_High_4_1, "H264_High_4_1");
  99. GET_SYM(kVTProfileLevel_H264_High_4_2, "H264_High_4_2");
  100. GET_SYM(kVTProfileLevel_H264_High_5_1, "H264_High_5_1");
  101. GET_SYM(kVTProfileLevel_H264_High_5_2, "H264_High_5_2");
  102. GET_SYM(kVTProfileLevel_H264_High_AutoLevel, "H264_High_AutoLevel");
  103. GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
  104. GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
  105. "EnableHardwareAcceleratedVideoEncoder");
  106. GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
  107. "RequireHardwareAcceleratedVideoEncoder");
  108. }
  109. typedef enum VT_H264Profile {
  110. H264_PROF_AUTO,
  111. H264_PROF_BASELINE,
  112. H264_PROF_MAIN,
  113. H264_PROF_HIGH,
  114. H264_PROF_COUNT
  115. } VT_H264Profile;
  116. typedef enum VTH264Entropy{
  117. VT_ENTROPY_NOT_SET,
  118. VT_CAVLC,
  119. VT_CABAC
  120. } VTH264Entropy;
  121. static const uint8_t start_code[] = { 0, 0, 0, 1 };
  122. typedef struct ExtraSEI {
  123. void *data;
  124. size_t size;
  125. } ExtraSEI;
  126. typedef struct BufNode {
  127. CMSampleBufferRef cm_buffer;
  128. ExtraSEI *sei;
  129. struct BufNode* next;
  130. int error;
  131. } BufNode;
  132. typedef struct VTEncContext {
  133. AVClass *class;
  134. VTCompressionSessionRef session;
  135. CFStringRef ycbcr_matrix;
  136. CFStringRef color_primaries;
  137. CFStringRef transfer_function;
  138. pthread_mutex_t lock;
  139. pthread_cond_t cv_sample_sent;
  140. int async_error;
  141. BufNode *q_head;
  142. BufNode *q_tail;
  143. int64_t frame_ct_out;
  144. int64_t frame_ct_in;
  145. int64_t first_pts;
  146. int64_t dts_delta;
  147. int64_t profile;
  148. int64_t level;
  149. int64_t entropy;
  150. int64_t realtime;
  151. int64_t frames_before;
  152. int64_t frames_after;
  153. int64_t allow_sw;
  154. bool flushing;
  155. bool has_b_frames;
  156. bool warned_color_range;
  157. bool a53_cc;
  158. } VTEncContext;
  159. static int vtenc_populate_extradata(AVCodecContext *avctx,
  160. CMVideoCodecType codec_type,
  161. CFStringRef profile_level,
  162. CFNumberRef gamma_level,
  163. CFDictionaryRef enc_info,
  164. CFDictionaryRef pixel_buffer_info);
  165. /**
  166. * NULL-safe release of *refPtr, and sets value to NULL.
  167. */
  168. static void vt_release_num(CFNumberRef* refPtr){
  169. if (!*refPtr) {
  170. return;
  171. }
  172. CFRelease(*refPtr);
  173. *refPtr = NULL;
  174. }
  175. static void set_async_error(VTEncContext *vtctx, int err)
  176. {
  177. BufNode *info;
  178. pthread_mutex_lock(&vtctx->lock);
  179. vtctx->async_error = err;
  180. info = vtctx->q_head;
  181. vtctx->q_head = vtctx->q_tail = NULL;
  182. while (info) {
  183. BufNode *next = info->next;
  184. CFRelease(info->cm_buffer);
  185. av_free(info);
  186. info = next;
  187. }
  188. pthread_mutex_unlock(&vtctx->lock);
  189. }
  190. static void clear_frame_queue(VTEncContext *vtctx)
  191. {
  192. set_async_error(vtctx, 0);
  193. }
  194. static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
  195. {
  196. BufNode *info;
  197. pthread_mutex_lock(&vtctx->lock);
  198. if (vtctx->async_error) {
  199. pthread_mutex_unlock(&vtctx->lock);
  200. return vtctx->async_error;
  201. }
  202. if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
  203. *buf = NULL;
  204. pthread_mutex_unlock(&vtctx->lock);
  205. return 0;
  206. }
  207. while (!vtctx->q_head && !vtctx->async_error && wait) {
  208. pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
  209. }
  210. if (!vtctx->q_head) {
  211. pthread_mutex_unlock(&vtctx->lock);
  212. *buf = NULL;
  213. return 0;
  214. }
  215. info = vtctx->q_head;
  216. vtctx->q_head = vtctx->q_head->next;
  217. if (!vtctx->q_head) {
  218. vtctx->q_tail = NULL;
  219. }
  220. pthread_mutex_unlock(&vtctx->lock);
  221. *buf = info->cm_buffer;
  222. if (sei && *buf) {
  223. *sei = info->sei;
  224. } else if (info->sei) {
  225. if (info->sei->data) av_free(info->sei->data);
  226. av_free(info->sei);
  227. }
  228. av_free(info);
  229. vtctx->frame_ct_out++;
  230. return 0;
  231. }
  232. static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
  233. {
  234. BufNode *info = av_malloc(sizeof(BufNode));
  235. if (!info) {
  236. set_async_error(vtctx, AVERROR(ENOMEM));
  237. return;
  238. }
  239. CFRetain(buffer);
  240. info->cm_buffer = buffer;
  241. info->sei = sei;
  242. info->next = NULL;
  243. pthread_mutex_lock(&vtctx->lock);
  244. pthread_cond_signal(&vtctx->cv_sample_sent);
  245. if (!vtctx->q_head) {
  246. vtctx->q_head = info;
  247. } else {
  248. vtctx->q_tail->next = info;
  249. }
  250. vtctx->q_tail = info;
  251. pthread_mutex_unlock(&vtctx->lock);
  252. }
  253. static int count_nalus(size_t length_code_size,
  254. CMSampleBufferRef sample_buffer,
  255. int *count)
  256. {
  257. size_t offset = 0;
  258. int status;
  259. int nalu_ct = 0;
  260. uint8_t size_buf[4];
  261. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  262. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  263. if (length_code_size > 4)
  264. return AVERROR_INVALIDDATA;
  265. while (offset < src_size) {
  266. size_t curr_src_len;
  267. size_t box_len = 0;
  268. size_t i;
  269. status = CMBlockBufferCopyDataBytes(block,
  270. offset,
  271. length_code_size,
  272. size_buf);
  273. for (i = 0; i < length_code_size; i++) {
  274. box_len <<= 8;
  275. box_len |= size_buf[i];
  276. }
  277. curr_src_len = box_len + length_code_size;
  278. offset += curr_src_len;
  279. nalu_ct++;
  280. }
  281. *count = nalu_ct;
  282. return 0;
  283. }
  284. static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
  285. {
  286. switch (id) {
  287. case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
  288. default: return 0;
  289. }
  290. }
  291. /**
  292. * Get the parameter sets from a CMSampleBufferRef.
  293. * @param dst If *dst isn't NULL, the parameters are copied into existing
  294. * memory. *dst_size must be set accordingly when *dst != NULL.
  295. * If *dst is NULL, it will be allocated.
  296. * In all cases, *dst_size is set to the number of bytes used starting
  297. * at *dst.
  298. */
  299. static int get_params_size(
  300. AVCodecContext *avctx,
  301. CMVideoFormatDescriptionRef vid_fmt,
  302. size_t *size)
  303. {
  304. size_t total_size = 0;
  305. size_t ps_count;
  306. int is_count_bad = 0;
  307. size_t i;
  308. int status;
  309. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  310. 0,
  311. NULL,
  312. NULL,
  313. &ps_count,
  314. NULL);
  315. if (status) {
  316. is_count_bad = 1;
  317. ps_count = 0;
  318. status = 0;
  319. }
  320. for (i = 0; i < ps_count || is_count_bad; i++) {
  321. const uint8_t *ps;
  322. size_t ps_size;
  323. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  324. i,
  325. &ps,
  326. &ps_size,
  327. NULL,
  328. NULL);
  329. if (status) {
  330. /*
  331. * When ps_count is invalid, status != 0 ends the loop normally
  332. * unless we didn't get any parameter sets.
  333. */
  334. if (i > 0 && is_count_bad) status = 0;
  335. break;
  336. }
  337. total_size += ps_size + sizeof(start_code);
  338. }
  339. if (status) {
  340. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
  341. return AVERROR_EXTERNAL;
  342. }
  343. *size = total_size;
  344. return 0;
  345. }
  346. static int copy_param_sets(
  347. AVCodecContext *avctx,
  348. CMVideoFormatDescriptionRef vid_fmt,
  349. uint8_t *dst,
  350. size_t dst_size)
  351. {
  352. size_t ps_count;
  353. int is_count_bad = 0;
  354. int status;
  355. size_t offset = 0;
  356. size_t i;
  357. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  358. 0,
  359. NULL,
  360. NULL,
  361. &ps_count,
  362. NULL);
  363. if (status) {
  364. is_count_bad = 1;
  365. ps_count = 0;
  366. status = 0;
  367. }
  368. for (i = 0; i < ps_count || is_count_bad; i++) {
  369. const uint8_t *ps;
  370. size_t ps_size;
  371. size_t next_offset;
  372. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  373. i,
  374. &ps,
  375. &ps_size,
  376. NULL,
  377. NULL);
  378. if (status) {
  379. if (i > 0 && is_count_bad) status = 0;
  380. break;
  381. }
  382. next_offset = offset + sizeof(start_code) + ps_size;
  383. if (dst_size < next_offset) {
  384. av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
  385. return AVERROR_BUFFER_TOO_SMALL;
  386. }
  387. memcpy(dst + offset, start_code, sizeof(start_code));
  388. offset += sizeof(start_code);
  389. memcpy(dst + offset, ps, ps_size);
  390. offset = next_offset;
  391. }
  392. if (status) {
  393. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
  394. return AVERROR_EXTERNAL;
  395. }
  396. return 0;
  397. }
  398. static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
  399. {
  400. CMVideoFormatDescriptionRef vid_fmt;
  401. size_t total_size;
  402. int status;
  403. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  404. if (!vid_fmt) {
  405. av_log(avctx, AV_LOG_ERROR, "No video format.\n");
  406. return AVERROR_EXTERNAL;
  407. }
  408. status = get_params_size(avctx, vid_fmt, &total_size);
  409. if (status) {
  410. av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
  411. return status;
  412. }
  413. avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
  414. if (!avctx->extradata) {
  415. return AVERROR(ENOMEM);
  416. }
  417. avctx->extradata_size = total_size;
  418. status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
  419. if (status) {
  420. av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
  421. return status;
  422. }
  423. return 0;
  424. }
  425. static void vtenc_output_callback(
  426. void *ctx,
  427. void *sourceFrameCtx,
  428. OSStatus status,
  429. VTEncodeInfoFlags flags,
  430. CMSampleBufferRef sample_buffer)
  431. {
  432. AVCodecContext *avctx = ctx;
  433. VTEncContext *vtctx = avctx->priv_data;
  434. ExtraSEI *sei = sourceFrameCtx;
  435. if (vtctx->async_error) {
  436. if(sample_buffer) CFRelease(sample_buffer);
  437. return;
  438. }
  439. if (status || !sample_buffer) {
  440. av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
  441. set_async_error(vtctx, AVERROR_EXTERNAL);
  442. return;
  443. }
  444. if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  445. int set_status = set_extradata(avctx, sample_buffer);
  446. if (set_status) {
  447. set_async_error(vtctx, set_status);
  448. return;
  449. }
  450. }
  451. vtenc_q_push(vtctx, sample_buffer, sei);
  452. }
  453. static int get_length_code_size(
  454. AVCodecContext *avctx,
  455. CMSampleBufferRef sample_buffer,
  456. size_t *size)
  457. {
  458. CMVideoFormatDescriptionRef vid_fmt;
  459. int isize;
  460. int status;
  461. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  462. if (!vid_fmt) {
  463. av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
  464. return AVERROR_EXTERNAL;
  465. }
  466. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  467. 0,
  468. NULL,
  469. NULL,
  470. NULL,
  471. &isize);
  472. if (status) {
  473. av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
  474. return AVERROR_EXTERNAL;
  475. }
  476. *size = isize;
  477. return 0;
  478. }
  479. /*
  480. * Returns true on success.
  481. *
  482. * If profile_level_val is NULL and this method returns true, don't specify the
  483. * profile/level to the encoder.
  484. */
  485. static bool get_vt_profile_level(AVCodecContext *avctx,
  486. CFStringRef *profile_level_val)
  487. {
  488. VTEncContext *vtctx = avctx->priv_data;
  489. int64_t profile = vtctx->profile;
  490. if (profile == H264_PROF_AUTO && vtctx->level) {
  491. //Need to pick a profile if level is not auto-selected.
  492. profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
  493. }
  494. *profile_level_val = NULL;
  495. switch (profile) {
  496. case H264_PROF_AUTO:
  497. return true;
  498. case H264_PROF_BASELINE:
  499. switch (vtctx->level) {
  500. case 0: *profile_level_val =
  501. compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
  502. case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
  503. case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
  504. case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
  505. case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
  506. case 40: *profile_level_val =
  507. compat_keys.kVTProfileLevel_H264_Baseline_4_0; break;
  508. case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
  509. case 42: *profile_level_val =
  510. compat_keys.kVTProfileLevel_H264_Baseline_4_2; break;
  511. case 50: *profile_level_val =
  512. compat_keys.kVTProfileLevel_H264_Baseline_5_0; break;
  513. case 51: *profile_level_val =
  514. compat_keys.kVTProfileLevel_H264_Baseline_5_1; break;
  515. case 52: *profile_level_val =
  516. compat_keys.kVTProfileLevel_H264_Baseline_5_2; break;
  517. }
  518. break;
  519. case H264_PROF_MAIN:
  520. switch (vtctx->level) {
  521. case 0: *profile_level_val =
  522. compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
  523. case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
  524. case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
  525. case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
  526. case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
  527. case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
  528. case 42: *profile_level_val =
  529. compat_keys.kVTProfileLevel_H264_Main_4_2; break;
  530. case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
  531. case 51: *profile_level_val =
  532. compat_keys.kVTProfileLevel_H264_Main_5_1; break;
  533. case 52: *profile_level_val =
  534. compat_keys.kVTProfileLevel_H264_Main_5_2; break;
  535. }
  536. break;
  537. case H264_PROF_HIGH:
  538. switch (vtctx->level) {
  539. case 0: *profile_level_val =
  540. compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
  541. case 30: *profile_level_val =
  542. compat_keys.kVTProfileLevel_H264_High_3_0; break;
  543. case 31: *profile_level_val =
  544. compat_keys.kVTProfileLevel_H264_High_3_1; break;
  545. case 32: *profile_level_val =
  546. compat_keys.kVTProfileLevel_H264_High_3_2; break;
  547. case 40: *profile_level_val =
  548. compat_keys.kVTProfileLevel_H264_High_4_0; break;
  549. case 41: *profile_level_val =
  550. compat_keys.kVTProfileLevel_H264_High_4_1; break;
  551. case 42: *profile_level_val =
  552. compat_keys.kVTProfileLevel_H264_High_4_2; break;
  553. case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
  554. case 51: *profile_level_val =
  555. compat_keys.kVTProfileLevel_H264_High_5_1; break;
  556. case 52: *profile_level_val =
  557. compat_keys.kVTProfileLevel_H264_High_5_2; break;
  558. }
  559. break;
  560. }
  561. if (!*profile_level_val) {
  562. av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
  563. return false;
  564. }
  565. return true;
  566. }
  567. static int get_cv_pixel_format(AVCodecContext* avctx,
  568. enum AVPixelFormat fmt,
  569. enum AVColorRange range,
  570. int* av_pixel_format,
  571. int* range_guessed)
  572. {
  573. if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
  574. range != AVCOL_RANGE_JPEG;
  575. //MPEG range is used when no range is set
  576. if (fmt == AV_PIX_FMT_NV12) {
  577. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  578. kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
  579. kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  580. } else if (fmt == AV_PIX_FMT_YUV420P) {
  581. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  582. kCVPixelFormatType_420YpCbCr8PlanarFullRange :
  583. kCVPixelFormatType_420YpCbCr8Planar;
  584. } else {
  585. return AVERROR(EINVAL);
  586. }
  587. return 0;
  588. }
  589. static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
  590. VTEncContext *vtctx = avctx->priv_data;
  591. if (vtctx->color_primaries) {
  592. CFDictionarySetValue(dict,
  593. kCVImageBufferColorPrimariesKey,
  594. vtctx->color_primaries);
  595. }
  596. if (vtctx->transfer_function) {
  597. CFDictionarySetValue(dict,
  598. kCVImageBufferTransferFunctionKey,
  599. vtctx->transfer_function);
  600. }
  601. if (vtctx->ycbcr_matrix) {
  602. CFDictionarySetValue(dict,
  603. kCVImageBufferYCbCrMatrixKey,
  604. vtctx->ycbcr_matrix);
  605. }
  606. }
  607. static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
  608. CFMutableDictionaryRef* dict)
  609. {
  610. CFNumberRef cv_color_format_num = NULL;
  611. CFNumberRef width_num = NULL;
  612. CFNumberRef height_num = NULL;
  613. CFMutableDictionaryRef pixel_buffer_info = NULL;
  614. int cv_color_format;
  615. int status = get_cv_pixel_format(avctx,
  616. avctx->pix_fmt,
  617. avctx->color_range,
  618. &cv_color_format,
  619. NULL);
  620. if (status) return status;
  621. pixel_buffer_info = CFDictionaryCreateMutable(
  622. kCFAllocatorDefault,
  623. 20,
  624. &kCFCopyStringDictionaryKeyCallBacks,
  625. &kCFTypeDictionaryValueCallBacks);
  626. if (!pixel_buffer_info) goto pbinfo_nomem;
  627. cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
  628. kCFNumberSInt32Type,
  629. &cv_color_format);
  630. if (!cv_color_format_num) goto pbinfo_nomem;
  631. CFDictionarySetValue(pixel_buffer_info,
  632. kCVPixelBufferPixelFormatTypeKey,
  633. cv_color_format_num);
  634. vt_release_num(&cv_color_format_num);
  635. width_num = CFNumberCreate(kCFAllocatorDefault,
  636. kCFNumberSInt32Type,
  637. &avctx->width);
  638. if (!width_num) return AVERROR(ENOMEM);
  639. CFDictionarySetValue(pixel_buffer_info,
  640. kCVPixelBufferWidthKey,
  641. width_num);
  642. vt_release_num(&width_num);
  643. height_num = CFNumberCreate(kCFAllocatorDefault,
  644. kCFNumberSInt32Type,
  645. &avctx->height);
  646. if (!height_num) goto pbinfo_nomem;
  647. CFDictionarySetValue(pixel_buffer_info,
  648. kCVPixelBufferHeightKey,
  649. height_num);
  650. vt_release_num(&height_num);
  651. add_color_attr(avctx, pixel_buffer_info);
  652. *dict = pixel_buffer_info;
  653. return 0;
  654. pbinfo_nomem:
  655. vt_release_num(&cv_color_format_num);
  656. vt_release_num(&width_num);
  657. vt_release_num(&height_num);
  658. if (pixel_buffer_info) CFRelease(pixel_buffer_info);
  659. return AVERROR(ENOMEM);
  660. }
  661. static int get_cv_color_primaries(AVCodecContext *avctx,
  662. CFStringRef *primaries)
  663. {
  664. enum AVColorPrimaries pri = avctx->color_primaries;
  665. switch (pri) {
  666. case AVCOL_PRI_UNSPECIFIED:
  667. *primaries = NULL;
  668. break;
  669. case AVCOL_PRI_BT709:
  670. *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
  671. break;
  672. case AVCOL_PRI_BT2020:
  673. *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
  674. break;
  675. default:
  676. av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
  677. *primaries = NULL;
  678. return -1;
  679. }
  680. return 0;
  681. }
  682. static int get_cv_transfer_function(AVCodecContext *avctx,
  683. CFStringRef *transfer_fnc,
  684. CFNumberRef *gamma_level)
  685. {
  686. enum AVColorTransferCharacteristic trc = avctx->color_trc;
  687. Float32 gamma;
  688. *gamma_level = NULL;
  689. switch (trc) {
  690. case AVCOL_TRC_UNSPECIFIED:
  691. *transfer_fnc = NULL;
  692. break;
  693. case AVCOL_TRC_BT709:
  694. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
  695. break;
  696. case AVCOL_TRC_SMPTE240M:
  697. *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
  698. break;
  699. case AVCOL_TRC_GAMMA22:
  700. gamma = 2.2;
  701. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  702. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  703. break;
  704. case AVCOL_TRC_GAMMA28:
  705. gamma = 2.8;
  706. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  707. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  708. break;
  709. case AVCOL_TRC_BT2020_10:
  710. case AVCOL_TRC_BT2020_12:
  711. *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
  712. break;
  713. default:
  714. av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
  715. return -1;
  716. }
  717. return 0;
  718. }
  719. static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
  720. switch(avctx->colorspace) {
  721. case AVCOL_SPC_BT709:
  722. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
  723. break;
  724. case AVCOL_SPC_UNSPECIFIED:
  725. *matrix = NULL;
  726. break;
  727. case AVCOL_SPC_BT470BG:
  728. case AVCOL_SPC_SMPTE170M:
  729. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
  730. break;
  731. case AVCOL_SPC_SMPTE240M:
  732. *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
  733. break;
  734. case AVCOL_SPC_BT2020_NCL:
  735. *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
  736. break;
  737. default:
  738. av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
  739. return -1;
  740. }
  741. return 0;
  742. }
  743. static int vtenc_create_encoder(AVCodecContext *avctx,
  744. CMVideoCodecType codec_type,
  745. CFStringRef profile_level,
  746. CFNumberRef gamma_level,
  747. CFDictionaryRef enc_info,
  748. CFDictionaryRef pixel_buffer_info,
  749. VTCompressionSessionRef *session)
  750. {
  751. VTEncContext *vtctx = avctx->priv_data;
  752. SInt32 bit_rate = avctx->bit_rate;
  753. CFNumberRef bit_rate_num;
  754. int status = VTCompressionSessionCreate(kCFAllocatorDefault,
  755. avctx->width,
  756. avctx->height,
  757. codec_type,
  758. enc_info,
  759. pixel_buffer_info,
  760. kCFAllocatorDefault,
  761. vtenc_output_callback,
  762. avctx,
  763. session);
  764. if (status || !vtctx->session) {
  765. av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
  766. #if !TARGET_OS_IPHONE
  767. if (!vtctx->allow_sw) {
  768. av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
  769. }
  770. #endif
  771. return AVERROR_EXTERNAL;
  772. }
  773. bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
  774. kCFNumberSInt32Type,
  775. &bit_rate);
  776. if (!bit_rate_num) return AVERROR(ENOMEM);
  777. status = VTSessionSetProperty(vtctx->session,
  778. kVTCompressionPropertyKey_AverageBitRate,
  779. bit_rate_num);
  780. CFRelease(bit_rate_num);
  781. if (status) {
  782. av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
  783. return AVERROR_EXTERNAL;
  784. }
  785. if (profile_level) {
  786. status = VTSessionSetProperty(vtctx->session,
  787. kVTCompressionPropertyKey_ProfileLevel,
  788. profile_level);
  789. if (status) {
  790. av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
  791. }
  792. }
  793. if (avctx->gop_size > 0) {
  794. CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
  795. kCFNumberIntType,
  796. &avctx->gop_size);
  797. if (!interval) {
  798. return AVERROR(ENOMEM);
  799. }
  800. status = VTSessionSetProperty(vtctx->session,
  801. kVTCompressionPropertyKey_MaxKeyFrameInterval,
  802. interval);
  803. CFRelease(interval);
  804. if (status) {
  805. av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
  806. return AVERROR_EXTERNAL;
  807. }
  808. }
  809. if (vtctx->frames_before) {
  810. status = VTSessionSetProperty(vtctx->session,
  811. kVTCompressionPropertyKey_MoreFramesBeforeStart,
  812. kCFBooleanTrue);
  813. if (status == kVTPropertyNotSupportedErr) {
  814. av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
  815. } else if (status) {
  816. av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
  817. }
  818. }
  819. if (vtctx->frames_after) {
  820. status = VTSessionSetProperty(vtctx->session,
  821. kVTCompressionPropertyKey_MoreFramesAfterEnd,
  822. kCFBooleanTrue);
  823. if (status == kVTPropertyNotSupportedErr) {
  824. av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
  825. } else if (status) {
  826. av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
  827. }
  828. }
  829. if (avctx->sample_aspect_ratio.num != 0) {
  830. CFNumberRef num;
  831. CFNumberRef den;
  832. CFMutableDictionaryRef par;
  833. AVRational *avpar = &avctx->sample_aspect_ratio;
  834. av_reduce(&avpar->num, &avpar->den,
  835. avpar->num, avpar->den,
  836. 0xFFFFFFFF);
  837. num = CFNumberCreate(kCFAllocatorDefault,
  838. kCFNumberIntType,
  839. &avpar->num);
  840. den = CFNumberCreate(kCFAllocatorDefault,
  841. kCFNumberIntType,
  842. &avpar->den);
  843. par = CFDictionaryCreateMutable(kCFAllocatorDefault,
  844. 2,
  845. &kCFCopyStringDictionaryKeyCallBacks,
  846. &kCFTypeDictionaryValueCallBacks);
  847. if (!par || !num || !den) {
  848. if (par) CFRelease(par);
  849. if (num) CFRelease(num);
  850. if (den) CFRelease(den);
  851. return AVERROR(ENOMEM);
  852. }
  853. CFDictionarySetValue(
  854. par,
  855. kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
  856. num);
  857. CFDictionarySetValue(
  858. par,
  859. kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
  860. den);
  861. status = VTSessionSetProperty(vtctx->session,
  862. kVTCompressionPropertyKey_PixelAspectRatio,
  863. par);
  864. CFRelease(par);
  865. CFRelease(num);
  866. CFRelease(den);
  867. if (status) {
  868. av_log(avctx,
  869. AV_LOG_ERROR,
  870. "Error setting pixel aspect ratio to %d:%d: %d.\n",
  871. avctx->sample_aspect_ratio.num,
  872. avctx->sample_aspect_ratio.den,
  873. status);
  874. return AVERROR_EXTERNAL;
  875. }
  876. }
  877. if (vtctx->transfer_function) {
  878. status = VTSessionSetProperty(vtctx->session,
  879. kVTCompressionPropertyKey_TransferFunction,
  880. vtctx->transfer_function);
  881. if (status) {
  882. av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
  883. }
  884. }
  885. if (vtctx->ycbcr_matrix) {
  886. status = VTSessionSetProperty(vtctx->session,
  887. kVTCompressionPropertyKey_YCbCrMatrix,
  888. vtctx->ycbcr_matrix);
  889. if (status) {
  890. av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
  891. }
  892. }
  893. if (vtctx->color_primaries) {
  894. status = VTSessionSetProperty(vtctx->session,
  895. kVTCompressionPropertyKey_ColorPrimaries,
  896. vtctx->color_primaries);
  897. if (status) {
  898. av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
  899. }
  900. }
  901. if (gamma_level) {
  902. status = VTSessionSetProperty(vtctx->session,
  903. kCVImageBufferGammaLevelKey,
  904. gamma_level);
  905. if (status) {
  906. av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
  907. }
  908. }
  909. if (!vtctx->has_b_frames) {
  910. status = VTSessionSetProperty(vtctx->session,
  911. kVTCompressionPropertyKey_AllowFrameReordering,
  912. kCFBooleanFalse);
  913. if (status) {
  914. av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
  915. return AVERROR_EXTERNAL;
  916. }
  917. }
  918. if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
  919. CFStringRef entropy = vtctx->entropy == VT_CABAC ?
  920. compat_keys.kVTH264EntropyMode_CABAC:
  921. compat_keys.kVTH264EntropyMode_CAVLC;
  922. status = VTSessionSetProperty(vtctx->session,
  923. compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
  924. entropy);
  925. if (status) {
  926. av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
  927. }
  928. }
  929. if (vtctx->realtime) {
  930. status = VTSessionSetProperty(vtctx->session,
  931. compat_keys.kVTCompressionPropertyKey_RealTime,
  932. kCFBooleanTrue);
  933. if (status) {
  934. av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
  935. }
  936. }
  937. status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
  938. if (status) {
  939. av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
  940. return AVERROR_EXTERNAL;
  941. }
  942. return 0;
  943. }
  944. static av_cold int vtenc_init(AVCodecContext *avctx)
  945. {
  946. CFMutableDictionaryRef enc_info;
  947. CFMutableDictionaryRef pixel_buffer_info;
  948. CMVideoCodecType codec_type;
  949. VTEncContext *vtctx = avctx->priv_data;
  950. CFStringRef profile_level;
  951. CFBooleanRef has_b_frames_cfbool;
  952. CFNumberRef gamma_level = NULL;
  953. int status;
  954. pthread_once(&once_ctrl, loadVTEncSymbols);
  955. codec_type = get_cm_codec_type(avctx->codec_id);
  956. if (!codec_type) {
  957. av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
  958. return AVERROR(EINVAL);
  959. }
  960. vtctx->has_b_frames = avctx->max_b_frames > 0;
  961. if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
  962. av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
  963. vtctx->has_b_frames = false;
  964. }
  965. if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
  966. av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
  967. vtctx->entropy = VT_ENTROPY_NOT_SET;
  968. }
  969. if (!get_vt_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
  970. vtctx->session = NULL;
  971. enc_info = CFDictionaryCreateMutable(
  972. kCFAllocatorDefault,
  973. 20,
  974. &kCFCopyStringDictionaryKeyCallBacks,
  975. &kCFTypeDictionaryValueCallBacks
  976. );
  977. if (!enc_info) return AVERROR(ENOMEM);
  978. #if !TARGET_OS_IPHONE
  979. if (!vtctx->allow_sw) {
  980. CFDictionarySetValue(enc_info,
  981. compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
  982. kCFBooleanTrue);
  983. } else {
  984. CFDictionarySetValue(enc_info,
  985. compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
  986. kCFBooleanTrue);
  987. }
  988. #endif
  989. if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
  990. status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
  991. if (status)
  992. goto init_cleanup;
  993. } else {
  994. pixel_buffer_info = NULL;
  995. }
  996. pthread_mutex_init(&vtctx->lock, NULL);
  997. pthread_cond_init(&vtctx->cv_sample_sent, NULL);
  998. vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
  999. get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
  1000. get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
  1001. get_cv_color_primaries(avctx, &vtctx->color_primaries);
  1002. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  1003. status = vtenc_populate_extradata(avctx,
  1004. codec_type,
  1005. profile_level,
  1006. gamma_level,
  1007. enc_info,
  1008. pixel_buffer_info);
  1009. if (status)
  1010. goto init_cleanup;
  1011. }
  1012. status = vtenc_create_encoder(avctx,
  1013. codec_type,
  1014. profile_level,
  1015. gamma_level,
  1016. enc_info,
  1017. pixel_buffer_info,
  1018. &vtctx->session);
  1019. if (status < 0)
  1020. goto init_cleanup;
  1021. status = VTSessionCopyProperty(vtctx->session,
  1022. kVTCompressionPropertyKey_AllowFrameReordering,
  1023. kCFAllocatorDefault,
  1024. &has_b_frames_cfbool);
  1025. if (!status) {
  1026. //Some devices don't output B-frames for main profile, even if requested.
  1027. vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
  1028. CFRelease(has_b_frames_cfbool);
  1029. }
  1030. avctx->has_b_frames = vtctx->has_b_frames;
  1031. init_cleanup:
  1032. if (gamma_level)
  1033. CFRelease(gamma_level);
  1034. if (pixel_buffer_info)
  1035. CFRelease(pixel_buffer_info);
  1036. CFRelease(enc_info);
  1037. return status;
  1038. }
  1039. static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
  1040. {
  1041. CFArrayRef attachments;
  1042. CFDictionaryRef attachment;
  1043. CFBooleanRef not_sync;
  1044. CFIndex len;
  1045. attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
  1046. len = !attachments ? 0 : CFArrayGetCount(attachments);
  1047. if (!len) {
  1048. *is_key_frame = true;
  1049. return;
  1050. }
  1051. attachment = CFArrayGetValueAtIndex(attachments, 0);
  1052. if (CFDictionaryGetValueIfPresent(attachment,
  1053. kCMSampleAttachmentKey_NotSync,
  1054. (const void **)&not_sync))
  1055. {
  1056. *is_key_frame = !CFBooleanGetValue(not_sync);
  1057. } else {
  1058. *is_key_frame = true;
  1059. }
  1060. }
  1061. static int is_post_sei_nal_type(int nal_type){
  1062. return nal_type != H264_NAL_SEI &&
  1063. nal_type != H264_NAL_SPS &&
  1064. nal_type != H264_NAL_PPS &&
  1065. nal_type != H264_NAL_AUD;
  1066. }
  1067. /*
  1068. * Finds the sei message start/size of type find_sei_type.
  1069. * If more than one of that type exists, the last one is returned.
  1070. */
  1071. static int find_sei_end(AVCodecContext *avctx,
  1072. uint8_t *nal_data,
  1073. size_t nal_size,
  1074. uint8_t **sei_end)
  1075. {
  1076. int nal_type;
  1077. size_t sei_payload_size = 0;
  1078. int sei_payload_type = 0;
  1079. *sei_end = NULL;
  1080. uint8_t *nal_start = nal_data;
  1081. if (!nal_size)
  1082. return 0;
  1083. nal_type = *nal_data & 0x1F;
  1084. if (nal_type != H264_NAL_SEI)
  1085. return 0;
  1086. nal_data++;
  1087. nal_size--;
  1088. if (nal_data[nal_size - 1] == 0x80)
  1089. nal_size--;
  1090. while (nal_size > 0 && *nal_data > 0) {
  1091. do{
  1092. sei_payload_type += *nal_data;
  1093. nal_data++;
  1094. nal_size--;
  1095. } while (nal_size > 0 && *nal_data == 0xFF);
  1096. if (!nal_size) {
  1097. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
  1098. return AVERROR_INVALIDDATA;
  1099. }
  1100. do{
  1101. sei_payload_size += *nal_data;
  1102. nal_data++;
  1103. nal_size--;
  1104. } while (nal_size > 0 && *nal_data == 0xFF);
  1105. if (nal_size < sei_payload_size) {
  1106. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
  1107. return AVERROR_INVALIDDATA;
  1108. }
  1109. nal_data += sei_payload_size;
  1110. nal_size -= sei_payload_size;
  1111. }
  1112. *sei_end = nal_data;
  1113. return nal_data - nal_start + 1;
  1114. }
  1115. /**
  1116. * Copies the data inserting emulation prevention bytes as needed.
  1117. * Existing data in the destination can be taken into account by providing
  1118. * dst with a dst_offset > 0.
  1119. *
  1120. * @return The number of bytes copied on success. On failure, the negative of
  1121. * the number of bytes needed to copy src is returned.
  1122. */
  1123. static int copy_emulation_prev(const uint8_t *src,
  1124. size_t src_size,
  1125. uint8_t *dst,
  1126. ssize_t dst_offset,
  1127. size_t dst_size)
  1128. {
  1129. int zeros = 0;
  1130. int wrote_bytes;
  1131. uint8_t* dst_start;
  1132. uint8_t* dst_end = dst + dst_size;
  1133. const uint8_t* src_end = src + src_size;
  1134. int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
  1135. int i;
  1136. for (i = start_at; i < dst_offset && i < dst_size; i++) {
  1137. if (!dst[i])
  1138. zeros++;
  1139. else
  1140. zeros = 0;
  1141. }
  1142. dst += dst_offset;
  1143. dst_start = dst;
  1144. for (; src < src_end; src++, dst++) {
  1145. if (zeros == 2) {
  1146. int insert_ep3_byte = *src <= 3;
  1147. if (insert_ep3_byte) {
  1148. if (dst < dst_end)
  1149. *dst = 3;
  1150. dst++;
  1151. }
  1152. zeros = 0;
  1153. }
  1154. if (dst < dst_end)
  1155. *dst = *src;
  1156. if (!*src)
  1157. zeros++;
  1158. else
  1159. zeros = 0;
  1160. }
  1161. wrote_bytes = dst - dst_start;
  1162. if (dst > dst_end)
  1163. return -wrote_bytes;
  1164. return wrote_bytes;
  1165. }
  1166. static int write_sei(const ExtraSEI *sei,
  1167. int sei_type,
  1168. uint8_t *dst,
  1169. size_t dst_size)
  1170. {
  1171. uint8_t *sei_start = dst;
  1172. size_t remaining_sei_size = sei->size;
  1173. size_t remaining_dst_size = dst_size;
  1174. int header_bytes;
  1175. int bytes_written;
  1176. ssize_t offset;
  1177. if (!remaining_dst_size)
  1178. return AVERROR_BUFFER_TOO_SMALL;
  1179. while (sei_type && remaining_dst_size != 0) {
  1180. int sei_byte = sei_type > 255 ? 255 : sei_type;
  1181. *dst = sei_byte;
  1182. sei_type -= sei_byte;
  1183. dst++;
  1184. remaining_dst_size--;
  1185. }
  1186. if (!dst_size)
  1187. return AVERROR_BUFFER_TOO_SMALL;
  1188. while (remaining_sei_size && remaining_dst_size != 0) {
  1189. int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
  1190. *dst = size_byte;
  1191. remaining_sei_size -= size_byte;
  1192. dst++;
  1193. remaining_dst_size--;
  1194. }
  1195. if (remaining_dst_size < sei->size)
  1196. return AVERROR_BUFFER_TOO_SMALL;
  1197. header_bytes = dst - sei_start;
  1198. offset = header_bytes;
  1199. bytes_written = copy_emulation_prev(sei->data,
  1200. sei->size,
  1201. sei_start,
  1202. offset,
  1203. dst_size);
  1204. if (bytes_written < 0)
  1205. return AVERROR_BUFFER_TOO_SMALL;
  1206. bytes_written += header_bytes;
  1207. return bytes_written;
  1208. }
  1209. /**
  1210. * Copies NAL units and replaces length codes with
  1211. * H.264 Annex B start codes. On failure, the contents of
  1212. * dst_data may have been modified.
  1213. *
  1214. * @param length_code_size Byte length of each length code
  1215. * @param sample_buffer NAL units prefixed with length codes.
  1216. * @param sei Optional A53 closed captions SEI data.
  1217. * @param dst_data Must be zeroed before calling this function.
  1218. * Contains the copied NAL units prefixed with
  1219. * start codes when the function returns
  1220. * successfully.
  1221. * @param dst_size Length of dst_data
  1222. * @return 0 on success
  1223. * AVERROR_INVALIDDATA if length_code_size is invalid
  1224. * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
  1225. * or if a length_code in src_data specifies data beyond
  1226. * the end of its buffer.
  1227. */
  1228. static int copy_replace_length_codes(
  1229. AVCodecContext *avctx,
  1230. size_t length_code_size,
  1231. CMSampleBufferRef sample_buffer,
  1232. ExtraSEI *sei,
  1233. uint8_t *dst_data,
  1234. size_t dst_size)
  1235. {
  1236. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1237. size_t remaining_src_size = src_size;
  1238. size_t remaining_dst_size = dst_size;
  1239. size_t src_offset = 0;
  1240. int wrote_sei = 0;
  1241. int status;
  1242. uint8_t size_buf[4];
  1243. uint8_t nal_type;
  1244. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  1245. if (length_code_size > 4) {
  1246. return AVERROR_INVALIDDATA;
  1247. }
  1248. while (remaining_src_size > 0) {
  1249. size_t curr_src_len;
  1250. size_t curr_dst_len;
  1251. size_t box_len = 0;
  1252. size_t i;
  1253. uint8_t *dst_box;
  1254. status = CMBlockBufferCopyDataBytes(block,
  1255. src_offset,
  1256. length_code_size,
  1257. size_buf);
  1258. if (status) {
  1259. av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
  1260. return AVERROR_EXTERNAL;
  1261. }
  1262. status = CMBlockBufferCopyDataBytes(block,
  1263. src_offset + length_code_size,
  1264. 1,
  1265. &nal_type);
  1266. if (status) {
  1267. av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
  1268. return AVERROR_EXTERNAL;
  1269. }
  1270. nal_type &= 0x1F;
  1271. for (i = 0; i < length_code_size; i++) {
  1272. box_len <<= 8;
  1273. box_len |= size_buf[i];
  1274. }
  1275. if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
  1276. //No SEI NAL unit - insert.
  1277. int wrote_bytes;
  1278. memcpy(dst_data, start_code, sizeof(start_code));
  1279. dst_data += sizeof(start_code);
  1280. remaining_dst_size -= sizeof(start_code);
  1281. *dst_data = H264_NAL_SEI;
  1282. dst_data++;
  1283. remaining_dst_size--;
  1284. wrote_bytes = write_sei(sei,
  1285. SEI_TYPE_USER_DATA_REGISTERED,
  1286. dst_data,
  1287. remaining_dst_size);
  1288. if (wrote_bytes < 0)
  1289. return wrote_bytes;
  1290. remaining_dst_size -= wrote_bytes;
  1291. dst_data += wrote_bytes;
  1292. if (remaining_dst_size <= 0)
  1293. return AVERROR_BUFFER_TOO_SMALL;
  1294. *dst_data = 0x80;
  1295. dst_data++;
  1296. remaining_dst_size--;
  1297. wrote_sei = 1;
  1298. }
  1299. curr_src_len = box_len + length_code_size;
  1300. curr_dst_len = box_len + sizeof(start_code);
  1301. if (remaining_src_size < curr_src_len) {
  1302. return AVERROR_BUFFER_TOO_SMALL;
  1303. }
  1304. if (remaining_dst_size < curr_dst_len) {
  1305. return AVERROR_BUFFER_TOO_SMALL;
  1306. }
  1307. dst_box = dst_data + sizeof(start_code);
  1308. memcpy(dst_data, start_code, sizeof(start_code));
  1309. status = CMBlockBufferCopyDataBytes(block,
  1310. src_offset + length_code_size,
  1311. box_len,
  1312. dst_box);
  1313. if (status) {
  1314. av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
  1315. return AVERROR_EXTERNAL;
  1316. }
  1317. if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
  1318. //Found SEI NAL unit - append.
  1319. int wrote_bytes;
  1320. int old_sei_length;
  1321. int extra_bytes;
  1322. uint8_t *new_sei;
  1323. old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
  1324. if (old_sei_length < 0)
  1325. return status;
  1326. wrote_bytes = write_sei(sei,
  1327. SEI_TYPE_USER_DATA_REGISTERED,
  1328. new_sei,
  1329. remaining_dst_size - old_sei_length);
  1330. if (wrote_bytes < 0)
  1331. return wrote_bytes;
  1332. if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
  1333. return AVERROR_BUFFER_TOO_SMALL;
  1334. new_sei[wrote_bytes++] = 0x80;
  1335. extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
  1336. dst_data += extra_bytes;
  1337. remaining_dst_size -= extra_bytes;
  1338. wrote_sei = 1;
  1339. }
  1340. src_offset += curr_src_len;
  1341. dst_data += curr_dst_len;
  1342. remaining_src_size -= curr_src_len;
  1343. remaining_dst_size -= curr_dst_len;
  1344. }
  1345. return 0;
  1346. }
  1347. /**
  1348. * Returns a sufficient number of bytes to contain the sei data.
  1349. * It may be greater than the minimum required.
  1350. */
  1351. static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
  1352. int copied_size;
  1353. if (sei->size == 0)
  1354. return 0;
  1355. copied_size = -copy_emulation_prev(sei->data,
  1356. sei->size,
  1357. NULL,
  1358. 0,
  1359. 0);
  1360. if ((sei->size % 255) == 0) //may result in an extra byte
  1361. copied_size++;
  1362. return copied_size + sei->size / 255 + 1 + type / 255 + 1;
  1363. }
  1364. static int vtenc_cm_to_avpacket(
  1365. AVCodecContext *avctx,
  1366. CMSampleBufferRef sample_buffer,
  1367. AVPacket *pkt,
  1368. ExtraSEI *sei)
  1369. {
  1370. VTEncContext *vtctx = avctx->priv_data;
  1371. int status;
  1372. bool is_key_frame;
  1373. bool add_header;
  1374. size_t length_code_size;
  1375. size_t header_size = 0;
  1376. size_t in_buf_size;
  1377. size_t out_buf_size;
  1378. size_t sei_nalu_size = 0;
  1379. int64_t dts_delta;
  1380. int64_t time_base_num;
  1381. int nalu_count;
  1382. CMTime pts;
  1383. CMTime dts;
  1384. CMVideoFormatDescriptionRef vid_fmt;
  1385. vtenc_get_frame_info(sample_buffer, &is_key_frame);
  1386. status = get_length_code_size(avctx, sample_buffer, &length_code_size);
  1387. if (status) return status;
  1388. add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
  1389. if (add_header) {
  1390. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  1391. if (!vid_fmt) {
  1392. av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
  1393. return AVERROR_EXTERNAL;
  1394. }
  1395. int status = get_params_size(avctx, vid_fmt, &header_size);
  1396. if (status) return status;
  1397. }
  1398. status = count_nalus(length_code_size, sample_buffer, &nalu_count);
  1399. if(status)
  1400. return status;
  1401. if (sei) {
  1402. size_t msg_size = get_sei_msg_bytes(sei,
  1403. SEI_TYPE_USER_DATA_REGISTERED);
  1404. sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
  1405. }
  1406. in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1407. out_buf_size = header_size +
  1408. in_buf_size +
  1409. sei_nalu_size +
  1410. nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
  1411. status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
  1412. if (status < 0)
  1413. return status;
  1414. if (add_header) {
  1415. status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
  1416. if(status) return status;
  1417. }
  1418. status = copy_replace_length_codes(
  1419. avctx,
  1420. length_code_size,
  1421. sample_buffer,
  1422. sei,
  1423. pkt->data + header_size,
  1424. pkt->size - header_size
  1425. );
  1426. if (status) {
  1427. av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
  1428. return status;
  1429. }
  1430. if (is_key_frame) {
  1431. pkt->flags |= AV_PKT_FLAG_KEY;
  1432. }
  1433. pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
  1434. dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
  1435. if (CMTIME_IS_INVALID(dts)) {
  1436. if (!vtctx->has_b_frames) {
  1437. dts = pts;
  1438. } else {
  1439. av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
  1440. return AVERROR_EXTERNAL;
  1441. }
  1442. }
  1443. dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
  1444. time_base_num = avctx->time_base.num;
  1445. pkt->pts = pts.value / time_base_num;
  1446. pkt->dts = dts.value / time_base_num - dts_delta;
  1447. pkt->size = out_buf_size;
  1448. return 0;
  1449. }
  1450. /*
  1451. * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
  1452. * containing all planes if so.
  1453. */
  1454. static int get_cv_pixel_info(
  1455. AVCodecContext *avctx,
  1456. const AVFrame *frame,
  1457. int *color,
  1458. int *plane_count,
  1459. size_t *widths,
  1460. size_t *heights,
  1461. size_t *strides,
  1462. size_t *contiguous_buf_size)
  1463. {
  1464. VTEncContext *vtctx = avctx->priv_data;
  1465. int av_format = frame->format;
  1466. int av_color_range = av_frame_get_color_range(frame);
  1467. int i;
  1468. int range_guessed;
  1469. int status;
  1470. status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
  1471. if (status) {
  1472. av_log(avctx,
  1473. AV_LOG_ERROR,
  1474. "Could not get pixel format for color format '%s' range '%s'.\n",
  1475. av_get_pix_fmt_name(av_format),
  1476. av_color_range > AVCOL_RANGE_UNSPECIFIED &&
  1477. av_color_range < AVCOL_RANGE_NB ?
  1478. av_color_range_name(av_color_range) :
  1479. "Unknown");
  1480. return AVERROR(EINVAL);
  1481. }
  1482. if (range_guessed) {
  1483. if (!vtctx->warned_color_range) {
  1484. vtctx->warned_color_range = true;
  1485. av_log(avctx,
  1486. AV_LOG_WARNING,
  1487. "Color range not set for %s. Using MPEG range.\n",
  1488. av_get_pix_fmt_name(av_format));
  1489. }
  1490. av_log(avctx, AV_LOG_WARNING, "");
  1491. }
  1492. switch (av_format) {
  1493. case AV_PIX_FMT_NV12:
  1494. *plane_count = 2;
  1495. widths [0] = avctx->width;
  1496. heights[0] = avctx->height;
  1497. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1498. widths [1] = (avctx->width + 1) / 2;
  1499. heights[1] = (avctx->height + 1) / 2;
  1500. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
  1501. break;
  1502. case AV_PIX_FMT_YUV420P:
  1503. *plane_count = 3;
  1504. widths [0] = avctx->width;
  1505. heights[0] = avctx->height;
  1506. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1507. widths [1] = (avctx->width + 1) / 2;
  1508. heights[1] = (avctx->height + 1) / 2;
  1509. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
  1510. widths [2] = (avctx->width + 1) / 2;
  1511. heights[2] = (avctx->height + 1) / 2;
  1512. strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
  1513. break;
  1514. default:
  1515. av_log(
  1516. avctx,
  1517. AV_LOG_ERROR,
  1518. "Could not get frame format info for color %d range %d.\n",
  1519. av_format,
  1520. av_color_range);
  1521. return AVERROR(EINVAL);
  1522. }
  1523. *contiguous_buf_size = 0;
  1524. for (i = 0; i < *plane_count; i++) {
  1525. if (i < *plane_count - 1 &&
  1526. frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
  1527. *contiguous_buf_size = 0;
  1528. break;
  1529. }
  1530. *contiguous_buf_size += strides[i] * heights[i];
  1531. }
  1532. return 0;
  1533. }
  1534. #if !TARGET_OS_IPHONE
  1535. //Not used on iOS - frame is always copied.
  1536. static void free_avframe(
  1537. void *release_ctx,
  1538. const void *data,
  1539. size_t size,
  1540. size_t plane_count,
  1541. const void *plane_addresses[])
  1542. {
  1543. AVFrame *frame = release_ctx;
  1544. av_frame_free(&frame);
  1545. }
  1546. #else
  1547. //Not used on OSX - frame is never copied.
  1548. static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
  1549. const AVFrame *frame,
  1550. CVPixelBufferRef cv_img,
  1551. const size_t *plane_strides,
  1552. const size_t *plane_rows)
  1553. {
  1554. int i, j;
  1555. size_t plane_count;
  1556. int status;
  1557. int rows;
  1558. int src_stride;
  1559. int dst_stride;
  1560. uint8_t *src_addr;
  1561. uint8_t *dst_addr;
  1562. size_t copy_bytes;
  1563. status = CVPixelBufferLockBaseAddress(cv_img, 0);
  1564. if (status) {
  1565. av_log(
  1566. avctx,
  1567. AV_LOG_ERROR,
  1568. "Error: Could not lock base address of CVPixelBuffer: %d.\n",
  1569. status
  1570. );
  1571. }
  1572. if (CVPixelBufferIsPlanar(cv_img)) {
  1573. plane_count = CVPixelBufferGetPlaneCount(cv_img);
  1574. for (i = 0; frame->data[i]; i++) {
  1575. if (i == plane_count) {
  1576. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1577. av_log(avctx,
  1578. AV_LOG_ERROR,
  1579. "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
  1580. );
  1581. return AVERROR_EXTERNAL;
  1582. }
  1583. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
  1584. src_addr = (uint8_t*)frame->data[i];
  1585. dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
  1586. src_stride = plane_strides[i];
  1587. rows = plane_rows[i];
  1588. if (dst_stride == src_stride) {
  1589. memcpy(dst_addr, src_addr, src_stride * rows);
  1590. } else {
  1591. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1592. for (j = 0; j < rows; j++) {
  1593. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1594. }
  1595. }
  1596. }
  1597. } else {
  1598. if (frame->data[1]) {
  1599. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1600. av_log(avctx,
  1601. AV_LOG_ERROR,
  1602. "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
  1603. );
  1604. return AVERROR_EXTERNAL;
  1605. }
  1606. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
  1607. src_addr = (uint8_t*)frame->data[0];
  1608. dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
  1609. src_stride = plane_strides[0];
  1610. rows = plane_rows[0];
  1611. if (dst_stride == src_stride) {
  1612. memcpy(dst_addr, src_addr, src_stride * rows);
  1613. } else {
  1614. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1615. for (j = 0; j < rows; j++) {
  1616. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1617. }
  1618. }
  1619. }
  1620. status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1621. if (status) {
  1622. av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
  1623. return AVERROR_EXTERNAL;
  1624. }
  1625. return 0;
  1626. }
  1627. #endif //!TARGET_OS_IPHONE
  1628. static int create_cv_pixel_buffer(AVCodecContext *avctx,
  1629. const AVFrame *frame,
  1630. CVPixelBufferRef *cv_img)
  1631. {
  1632. int plane_count;
  1633. int color;
  1634. size_t widths [AV_NUM_DATA_POINTERS];
  1635. size_t heights[AV_NUM_DATA_POINTERS];
  1636. size_t strides[AV_NUM_DATA_POINTERS];
  1637. int status;
  1638. size_t contiguous_buf_size;
  1639. #if TARGET_OS_IPHONE
  1640. CVPixelBufferPoolRef pix_buf_pool;
  1641. VTEncContext* vtctx = avctx->priv_data;
  1642. #else
  1643. CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
  1644. kCFAllocatorDefault,
  1645. 10,
  1646. &kCFCopyStringDictionaryKeyCallBacks,
  1647. &kCFTypeDictionaryValueCallBacks);
  1648. if (!pix_buf_attachments) return AVERROR(ENOMEM);
  1649. #endif
  1650. if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
  1651. av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
  1652. *cv_img = (CVPixelBufferRef)frame->data[3];
  1653. av_assert0(*cv_img);
  1654. CFRetain(*cv_img);
  1655. return 0;
  1656. }
  1657. memset(widths, 0, sizeof(widths));
  1658. memset(heights, 0, sizeof(heights));
  1659. memset(strides, 0, sizeof(strides));
  1660. status = get_cv_pixel_info(
  1661. avctx,
  1662. frame,
  1663. &color,
  1664. &plane_count,
  1665. widths,
  1666. heights,
  1667. strides,
  1668. &contiguous_buf_size
  1669. );
  1670. if (status) {
  1671. av_log(
  1672. avctx,
  1673. AV_LOG_ERROR,
  1674. "Error: Cannot convert format %d color_range %d: %d\n",
  1675. frame->format,
  1676. av_frame_get_color_range(frame),
  1677. status
  1678. );
  1679. return AVERROR_EXTERNAL;
  1680. }
  1681. #if TARGET_OS_IPHONE
  1682. pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
  1683. if (!pix_buf_pool) {
  1684. av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
  1685. return AVERROR_EXTERNAL;
  1686. }
  1687. status = CVPixelBufferPoolCreatePixelBuffer(NULL,
  1688. pix_buf_pool,
  1689. cv_img);
  1690. if (status) {
  1691. av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
  1692. return AVERROR_EXTERNAL;
  1693. }
  1694. status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
  1695. if (status) {
  1696. CFRelease(*cv_img);
  1697. *cv_img = NULL;
  1698. return status;
  1699. }
  1700. #else
  1701. AVFrame *enc_frame = av_frame_alloc();
  1702. if (!enc_frame) return AVERROR(ENOMEM);
  1703. status = av_frame_ref(enc_frame, frame);
  1704. if (status) {
  1705. av_frame_free(&enc_frame);
  1706. return status;
  1707. }
  1708. status = CVPixelBufferCreateWithPlanarBytes(
  1709. kCFAllocatorDefault,
  1710. enc_frame->width,
  1711. enc_frame->height,
  1712. color,
  1713. NULL,
  1714. contiguous_buf_size,
  1715. plane_count,
  1716. (void **)enc_frame->data,
  1717. widths,
  1718. heights,
  1719. strides,
  1720. free_avframe,
  1721. enc_frame,
  1722. NULL,
  1723. cv_img
  1724. );
  1725. add_color_attr(avctx, pix_buf_attachments);
  1726. CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
  1727. CFRelease(pix_buf_attachments);
  1728. if (status) {
  1729. av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
  1730. return AVERROR_EXTERNAL;
  1731. }
  1732. #endif
  1733. return 0;
  1734. }
  1735. static int create_encoder_dict_h264(const AVFrame *frame,
  1736. CFDictionaryRef* dict_out)
  1737. {
  1738. CFDictionaryRef dict = NULL;
  1739. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  1740. const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
  1741. const void *vals[] = { kCFBooleanTrue };
  1742. dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
  1743. if(!dict) return AVERROR(ENOMEM);
  1744. }
  1745. *dict_out = dict;
  1746. return 0;
  1747. }
  1748. static int vtenc_send_frame(AVCodecContext *avctx,
  1749. VTEncContext *vtctx,
  1750. const AVFrame *frame)
  1751. {
  1752. CMTime time;
  1753. CFDictionaryRef frame_dict;
  1754. CVPixelBufferRef cv_img = NULL;
  1755. AVFrameSideData *side_data = NULL;
  1756. ExtraSEI *sei = NULL;
  1757. int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
  1758. if (status) return status;
  1759. status = create_encoder_dict_h264(frame, &frame_dict);
  1760. if (status) {
  1761. CFRelease(cv_img);
  1762. return status;
  1763. }
  1764. side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
  1765. if (vtctx->a53_cc && side_data && side_data->size) {
  1766. sei = av_mallocz(sizeof(*sei));
  1767. if (!sei) {
  1768. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1769. } else {
  1770. int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
  1771. if (ret < 0) {
  1772. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1773. av_free(sei);
  1774. sei = NULL;
  1775. }
  1776. }
  1777. }
  1778. time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
  1779. status = VTCompressionSessionEncodeFrame(
  1780. vtctx->session,
  1781. cv_img,
  1782. time,
  1783. kCMTimeInvalid,
  1784. frame_dict,
  1785. sei,
  1786. NULL
  1787. );
  1788. if (frame_dict) CFRelease(frame_dict);
  1789. CFRelease(cv_img);
  1790. if (status) {
  1791. av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
  1792. return AVERROR_EXTERNAL;
  1793. }
  1794. return 0;
  1795. }
  1796. static av_cold int vtenc_frame(
  1797. AVCodecContext *avctx,
  1798. AVPacket *pkt,
  1799. const AVFrame *frame,
  1800. int *got_packet)
  1801. {
  1802. VTEncContext *vtctx = avctx->priv_data;
  1803. bool get_frame;
  1804. int status;
  1805. CMSampleBufferRef buf = NULL;
  1806. ExtraSEI *sei = NULL;
  1807. if (frame) {
  1808. status = vtenc_send_frame(avctx, vtctx, frame);
  1809. if (status) {
  1810. status = AVERROR_EXTERNAL;
  1811. goto end_nopkt;
  1812. }
  1813. if (vtctx->frame_ct_in == 0) {
  1814. vtctx->first_pts = frame->pts;
  1815. } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
  1816. vtctx->dts_delta = frame->pts - vtctx->first_pts;
  1817. }
  1818. vtctx->frame_ct_in++;
  1819. } else if(!vtctx->flushing) {
  1820. vtctx->flushing = true;
  1821. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1822. kCMTimeIndefinite);
  1823. if (status) {
  1824. av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
  1825. status = AVERROR_EXTERNAL;
  1826. goto end_nopkt;
  1827. }
  1828. }
  1829. *got_packet = 0;
  1830. get_frame = vtctx->dts_delta >= 0 || !frame;
  1831. if (!get_frame) {
  1832. status = 0;
  1833. goto end_nopkt;
  1834. }
  1835. status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
  1836. if (status) goto end_nopkt;
  1837. if (!buf) goto end_nopkt;
  1838. status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
  1839. if (sei) {
  1840. if (sei->data) av_free(sei->data);
  1841. av_free(sei);
  1842. }
  1843. CFRelease(buf);
  1844. if (status) goto end_nopkt;
  1845. *got_packet = 1;
  1846. return 0;
  1847. end_nopkt:
  1848. av_packet_unref(pkt);
  1849. return status;
  1850. }
  1851. static int vtenc_populate_extradata(AVCodecContext *avctx,
  1852. CMVideoCodecType codec_type,
  1853. CFStringRef profile_level,
  1854. CFNumberRef gamma_level,
  1855. CFDictionaryRef enc_info,
  1856. CFDictionaryRef pixel_buffer_info)
  1857. {
  1858. VTEncContext *vtctx = avctx->priv_data;
  1859. AVFrame *frame = av_frame_alloc();
  1860. int y_size = avctx->width * avctx->height;
  1861. int chroma_size = (avctx->width / 2) * (avctx->height / 2);
  1862. CMSampleBufferRef buf = NULL;
  1863. int status;
  1864. if (!frame)
  1865. return AVERROR(ENOMEM);
  1866. frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
  1867. if(!frame->buf[0]){
  1868. status = AVERROR(ENOMEM);
  1869. goto pe_cleanup;
  1870. }
  1871. status = vtenc_create_encoder(avctx,
  1872. codec_type,
  1873. profile_level,
  1874. gamma_level,
  1875. enc_info,
  1876. pixel_buffer_info,
  1877. &vtctx->session);
  1878. if (status)
  1879. goto pe_cleanup;
  1880. frame->data[0] = frame->buf[0]->data;
  1881. memset(frame->data[0], 0, y_size);
  1882. frame->data[1] = frame->buf[0]->data + y_size;
  1883. memset(frame->data[1], 128, chroma_size);
  1884. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  1885. frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
  1886. memset(frame->data[2], 128, chroma_size);
  1887. }
  1888. frame->linesize[0] = avctx->width;
  1889. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  1890. frame->linesize[1] =
  1891. frame->linesize[2] = (avctx->width + 1) / 2;
  1892. } else {
  1893. frame->linesize[1] = (avctx->width + 1) / 2;
  1894. }
  1895. frame->format = avctx->pix_fmt;
  1896. frame->width = avctx->width;
  1897. frame->height = avctx->height;
  1898. av_frame_set_colorspace(frame, avctx->colorspace);
  1899. av_frame_set_color_range(frame, avctx->color_range);
  1900. frame->color_trc = avctx->color_trc;
  1901. frame->color_primaries = avctx->color_primaries;
  1902. frame->pts = 0;
  1903. status = vtenc_send_frame(avctx, vtctx, frame);
  1904. if (status) {
  1905. av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
  1906. goto pe_cleanup;
  1907. }
  1908. //Populates extradata - output frames are flushed and param sets are available.
  1909. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1910. kCMTimeIndefinite);
  1911. if (status)
  1912. goto pe_cleanup;
  1913. status = vtenc_q_pop(vtctx, 0, &buf, NULL);
  1914. if (status) {
  1915. av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
  1916. goto pe_cleanup;
  1917. }
  1918. CFRelease(buf);
  1919. pe_cleanup:
  1920. if(vtctx->session)
  1921. CFRelease(vtctx->session);
  1922. vtctx->session = NULL;
  1923. vtctx->frame_ct_out = 0;
  1924. av_frame_unref(frame);
  1925. av_frame_free(&frame);
  1926. av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
  1927. return status;
  1928. }
  1929. static av_cold int vtenc_close(AVCodecContext *avctx)
  1930. {
  1931. VTEncContext *vtctx = avctx->priv_data;
  1932. if(!vtctx->session) return 0;
  1933. VTCompressionSessionCompleteFrames(vtctx->session,
  1934. kCMTimeIndefinite);
  1935. clear_frame_queue(vtctx);
  1936. pthread_cond_destroy(&vtctx->cv_sample_sent);
  1937. pthread_mutex_destroy(&vtctx->lock);
  1938. CFRelease(vtctx->session);
  1939. vtctx->session = NULL;
  1940. if (vtctx->color_primaries) {
  1941. CFRelease(vtctx->color_primaries);
  1942. vtctx->color_primaries = NULL;
  1943. }
  1944. if (vtctx->transfer_function) {
  1945. CFRelease(vtctx->transfer_function);
  1946. vtctx->transfer_function = NULL;
  1947. }
  1948. if (vtctx->ycbcr_matrix) {
  1949. CFRelease(vtctx->ycbcr_matrix);
  1950. vtctx->ycbcr_matrix = NULL;
  1951. }
  1952. return 0;
  1953. }
  1954. static const enum AVPixelFormat pix_fmts[] = {
  1955. AV_PIX_FMT_VIDEOTOOLBOX,
  1956. AV_PIX_FMT_NV12,
  1957. AV_PIX_FMT_YUV420P,
  1958. AV_PIX_FMT_NONE
  1959. };
  1960. #define OFFSET(x) offsetof(VTEncContext, x)
  1961. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1962. static const AVOption options[] = {
  1963. { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
  1964. { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
  1965. { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
  1966. { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
  1967. { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
  1968. { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
  1969. { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
  1970. { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
  1971. { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
  1972. { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
  1973. { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
  1974. { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
  1975. { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
  1976. { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
  1977. { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
  1978. { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL,
  1979. { .i64 = 0 }, 0, 1, VE },
  1980. { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
  1981. { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  1982. { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  1983. { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  1984. { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  1985. { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).",
  1986. OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1987. { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.",
  1988. OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1989. { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.",
  1990. OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1991. { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
  1992. { NULL },
  1993. };
  1994. static const AVClass h264_videotoolbox_class = {
  1995. .class_name = "h264_videotoolbox",
  1996. .item_name = av_default_item_name,
  1997. .option = options,
  1998. .version = LIBAVUTIL_VERSION_INT,
  1999. };
  2000. AVCodec ff_h264_videotoolbox_encoder = {
  2001. .name = "h264_videotoolbox",
  2002. .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
  2003. .type = AVMEDIA_TYPE_VIDEO,
  2004. .id = AV_CODEC_ID_H264,
  2005. .priv_data_size = sizeof(VTEncContext),
  2006. .pix_fmts = pix_fmts,
  2007. .init = vtenc_init,
  2008. .encode2 = vtenc_frame,
  2009. .close = vtenc_close,
  2010. .capabilities = AV_CODEC_CAP_DELAY,
  2011. .priv_class = &h264_videotoolbox_class,
  2012. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  2013. FF_CODEC_CAP_INIT_CLEANUP,
  2014. };