You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2343 lines
73KB

  1. /*
  2. * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <VideoToolbox/VideoToolbox.h>
  21. #include <CoreVideo/CoreVideo.h>
  22. #include <CoreMedia/CoreMedia.h>
  23. #include <TargetConditionals.h>
  24. #include <Availability.h>
  25. #include "avcodec.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/atomic.h"
  29. #include "libavutil/avstring.h"
  30. #include "libavcodec/avcodec.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "internal.h"
  33. #include <pthread.h>
  34. #include "h264.h"
  35. #include "h264_sei.h"
  36. #if !CONFIG_VT_BT2020
  37. # define kCVImageBufferColorPrimaries_ITU_R_2020 CFSTR("ITU_R_2020")
  38. # define kCVImageBufferTransferFunction_ITU_R_2020 CFSTR("ITU_R_2020")
  39. # define kCVImageBufferYCbCrMatrix_ITU_R_2020 CFSTR("ITU_R_2020")
  40. #endif
  41. typedef enum VT_H264Profile {
  42. H264_PROF_AUTO,
  43. H264_PROF_BASELINE,
  44. H264_PROF_MAIN,
  45. H264_PROF_HIGH,
  46. H264_PROF_COUNT
  47. } VT_H264Profile;
  48. typedef enum VTH264Entropy{
  49. VT_ENTROPY_NOT_SET,
  50. VT_CAVLC,
  51. VT_CABAC
  52. } VTH264Entropy;
  53. static const uint8_t start_code[] = { 0, 0, 0, 1 };
  54. typedef struct ExtraSEI {
  55. void *data;
  56. size_t size;
  57. } ExtraSEI;
  58. typedef struct BufNode {
  59. CMSampleBufferRef cm_buffer;
  60. ExtraSEI *sei;
  61. struct BufNode* next;
  62. int error;
  63. } BufNode;
  64. typedef struct VTEncContext {
  65. AVClass *class;
  66. VTCompressionSessionRef session;
  67. CFStringRef ycbcr_matrix;
  68. CFStringRef color_primaries;
  69. CFStringRef transfer_function;
  70. pthread_mutex_t lock;
  71. pthread_cond_t cv_sample_sent;
  72. int async_error;
  73. BufNode *q_head;
  74. BufNode *q_tail;
  75. int64_t frame_ct_out;
  76. int64_t frame_ct_in;
  77. int64_t first_pts;
  78. int64_t dts_delta;
  79. int64_t profile;
  80. int64_t level;
  81. int64_t entropy;
  82. int64_t realtime;
  83. int64_t frames_before;
  84. int64_t frames_after;
  85. int64_t allow_sw;
  86. bool flushing;
  87. bool has_b_frames;
  88. bool warned_color_range;
  89. bool a53_cc;
  90. } VTEncContext;
  91. static int vtenc_populate_extradata(AVCodecContext *avctx,
  92. CMVideoCodecType codec_type,
  93. CFStringRef profile_level,
  94. CFNumberRef gamma_level,
  95. CFDictionaryRef enc_info,
  96. CFDictionaryRef pixel_buffer_info);
  97. /**
  98. * NULL-safe release of *refPtr, and sets value to NULL.
  99. */
  100. static void vt_release_num(CFNumberRef* refPtr){
  101. if (!*refPtr) {
  102. return;
  103. }
  104. CFRelease(*refPtr);
  105. *refPtr = NULL;
  106. }
  107. static void set_async_error(VTEncContext *vtctx, int err)
  108. {
  109. BufNode *info;
  110. pthread_mutex_lock(&vtctx->lock);
  111. vtctx->async_error = err;
  112. info = vtctx->q_head;
  113. vtctx->q_head = vtctx->q_tail = NULL;
  114. while (info) {
  115. BufNode *next = info->next;
  116. CFRelease(info->cm_buffer);
  117. av_free(info);
  118. info = next;
  119. }
  120. pthread_mutex_unlock(&vtctx->lock);
  121. }
  122. static void clear_frame_queue(VTEncContext *vtctx)
  123. {
  124. set_async_error(vtctx, 0);
  125. }
  126. static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
  127. {
  128. BufNode *info;
  129. pthread_mutex_lock(&vtctx->lock);
  130. if (vtctx->async_error) {
  131. pthread_mutex_unlock(&vtctx->lock);
  132. return vtctx->async_error;
  133. }
  134. if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
  135. *buf = NULL;
  136. pthread_mutex_unlock(&vtctx->lock);
  137. return 0;
  138. }
  139. while (!vtctx->q_head && !vtctx->async_error && wait) {
  140. pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
  141. }
  142. if (!vtctx->q_head) {
  143. pthread_mutex_unlock(&vtctx->lock);
  144. *buf = NULL;
  145. return 0;
  146. }
  147. info = vtctx->q_head;
  148. vtctx->q_head = vtctx->q_head->next;
  149. if (!vtctx->q_head) {
  150. vtctx->q_tail = NULL;
  151. }
  152. pthread_mutex_unlock(&vtctx->lock);
  153. *buf = info->cm_buffer;
  154. if (sei && *buf) {
  155. *sei = info->sei;
  156. } else if (info->sei) {
  157. if (info->sei->data) av_free(info->sei->data);
  158. av_free(info->sei);
  159. }
  160. av_free(info);
  161. vtctx->frame_ct_out++;
  162. return 0;
  163. }
  164. static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
  165. {
  166. BufNode *info = av_malloc(sizeof(BufNode));
  167. if (!info) {
  168. set_async_error(vtctx, AVERROR(ENOMEM));
  169. return;
  170. }
  171. CFRetain(buffer);
  172. info->cm_buffer = buffer;
  173. info->sei = sei;
  174. info->next = NULL;
  175. pthread_mutex_lock(&vtctx->lock);
  176. pthread_cond_signal(&vtctx->cv_sample_sent);
  177. if (!vtctx->q_head) {
  178. vtctx->q_head = info;
  179. } else {
  180. vtctx->q_tail->next = info;
  181. }
  182. vtctx->q_tail = info;
  183. pthread_mutex_unlock(&vtctx->lock);
  184. }
  185. static int count_nalus(size_t length_code_size,
  186. CMSampleBufferRef sample_buffer,
  187. int *count)
  188. {
  189. size_t offset = 0;
  190. int status;
  191. int nalu_ct = 0;
  192. uint8_t size_buf[4];
  193. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  194. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  195. if (length_code_size > 4)
  196. return AVERROR_INVALIDDATA;
  197. while (offset < src_size) {
  198. size_t curr_src_len;
  199. size_t box_len = 0;
  200. size_t i;
  201. status = CMBlockBufferCopyDataBytes(block,
  202. offset,
  203. length_code_size,
  204. size_buf);
  205. for (i = 0; i < length_code_size; i++) {
  206. box_len <<= 8;
  207. box_len |= size_buf[i];
  208. }
  209. curr_src_len = box_len + length_code_size;
  210. offset += curr_src_len;
  211. nalu_ct++;
  212. }
  213. *count = nalu_ct;
  214. return 0;
  215. }
  216. static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
  217. {
  218. switch (id) {
  219. case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
  220. default: return 0;
  221. }
  222. }
  223. /**
  224. * Get the parameter sets from a CMSampleBufferRef.
  225. * @param dst If *dst isn't NULL, the parameters are copied into existing
  226. * memory. *dst_size must be set accordingly when *dst != NULL.
  227. * If *dst is NULL, it will be allocated.
  228. * In all cases, *dst_size is set to the number of bytes used starting
  229. * at *dst.
  230. */
  231. static int get_params_size(
  232. AVCodecContext *avctx,
  233. CMVideoFormatDescriptionRef vid_fmt,
  234. size_t *size)
  235. {
  236. size_t total_size = 0;
  237. size_t ps_count;
  238. int is_count_bad = 0;
  239. size_t i;
  240. int status;
  241. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  242. 0,
  243. NULL,
  244. NULL,
  245. &ps_count,
  246. NULL);
  247. if (status) {
  248. is_count_bad = 1;
  249. ps_count = 0;
  250. status = 0;
  251. }
  252. for (i = 0; i < ps_count || is_count_bad; i++) {
  253. const uint8_t *ps;
  254. size_t ps_size;
  255. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  256. i,
  257. &ps,
  258. &ps_size,
  259. NULL,
  260. NULL);
  261. if (status) {
  262. /*
  263. * When ps_count is invalid, status != 0 ends the loop normally
  264. * unless we didn't get any parameter sets.
  265. */
  266. if (i > 0 && is_count_bad) status = 0;
  267. break;
  268. }
  269. total_size += ps_size + sizeof(start_code);
  270. }
  271. if (status) {
  272. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
  273. return AVERROR_EXTERNAL;
  274. }
  275. *size = total_size;
  276. return 0;
  277. }
  278. static int copy_param_sets(
  279. AVCodecContext *avctx,
  280. CMVideoFormatDescriptionRef vid_fmt,
  281. uint8_t *dst,
  282. size_t dst_size)
  283. {
  284. size_t ps_count;
  285. int is_count_bad = 0;
  286. int status;
  287. size_t offset = 0;
  288. size_t i;
  289. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  290. 0,
  291. NULL,
  292. NULL,
  293. &ps_count,
  294. NULL);
  295. if (status) {
  296. is_count_bad = 1;
  297. ps_count = 0;
  298. status = 0;
  299. }
  300. for (i = 0; i < ps_count || is_count_bad; i++) {
  301. const uint8_t *ps;
  302. size_t ps_size;
  303. size_t next_offset;
  304. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  305. i,
  306. &ps,
  307. &ps_size,
  308. NULL,
  309. NULL);
  310. if (status) {
  311. if (i > 0 && is_count_bad) status = 0;
  312. break;
  313. }
  314. next_offset = offset + sizeof(start_code) + ps_size;
  315. if (dst_size < next_offset) {
  316. av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
  317. return AVERROR_BUFFER_TOO_SMALL;
  318. }
  319. memcpy(dst + offset, start_code, sizeof(start_code));
  320. offset += sizeof(start_code);
  321. memcpy(dst + offset, ps, ps_size);
  322. offset = next_offset;
  323. }
  324. if (status) {
  325. av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
  326. return AVERROR_EXTERNAL;
  327. }
  328. return 0;
  329. }
  330. static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
  331. {
  332. CMVideoFormatDescriptionRef vid_fmt;
  333. size_t total_size;
  334. int status;
  335. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  336. if (!vid_fmt) {
  337. av_log(avctx, AV_LOG_ERROR, "No video format.\n");
  338. return AVERROR_EXTERNAL;
  339. }
  340. status = get_params_size(avctx, vid_fmt, &total_size);
  341. if (status) {
  342. av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
  343. return status;
  344. }
  345. avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
  346. if (!avctx->extradata) {
  347. return AVERROR(ENOMEM);
  348. }
  349. avctx->extradata_size = total_size;
  350. status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
  351. if (status) {
  352. av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
  353. return status;
  354. }
  355. return 0;
  356. }
  357. static void vtenc_output_callback(
  358. void *ctx,
  359. void *sourceFrameCtx,
  360. OSStatus status,
  361. VTEncodeInfoFlags flags,
  362. CMSampleBufferRef sample_buffer)
  363. {
  364. AVCodecContext *avctx = ctx;
  365. VTEncContext *vtctx = avctx->priv_data;
  366. ExtraSEI *sei = sourceFrameCtx;
  367. if (vtctx->async_error) {
  368. if(sample_buffer) CFRelease(sample_buffer);
  369. return;
  370. }
  371. if (status || !sample_buffer) {
  372. av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
  373. set_async_error(vtctx, AVERROR_EXTERNAL);
  374. return;
  375. }
  376. if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
  377. int set_status = set_extradata(avctx, sample_buffer);
  378. if (set_status) {
  379. set_async_error(vtctx, set_status);
  380. return;
  381. }
  382. }
  383. vtenc_q_push(vtctx, sample_buffer, sei);
  384. }
  385. static int get_length_code_size(
  386. AVCodecContext *avctx,
  387. CMSampleBufferRef sample_buffer,
  388. size_t *size)
  389. {
  390. CMVideoFormatDescriptionRef vid_fmt;
  391. int isize;
  392. int status;
  393. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  394. if (!vid_fmt) {
  395. av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
  396. return AVERROR_EXTERNAL;
  397. }
  398. status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
  399. 0,
  400. NULL,
  401. NULL,
  402. NULL,
  403. &isize);
  404. if (status) {
  405. av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
  406. return AVERROR_EXTERNAL;
  407. }
  408. *size = isize;
  409. return 0;
  410. }
  411. /*
  412. * Returns true on success.
  413. *
  414. * If profile_level_val is NULL and this method returns true, don't specify the
  415. * profile/level to the encoder.
  416. */
  417. static bool get_vt_profile_level(AVCodecContext *avctx,
  418. CFStringRef *profile_level_val)
  419. {
  420. VTEncContext *vtctx = avctx->priv_data;
  421. int64_t profile = vtctx->profile;
  422. if (profile == H264_PROF_AUTO && vtctx->level) {
  423. //Need to pick a profile if level is not auto-selected.
  424. profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
  425. }
  426. *profile_level_val = NULL;
  427. switch (profile) {
  428. case H264_PROF_AUTO:
  429. return true;
  430. case H264_PROF_BASELINE:
  431. switch (vtctx->level) {
  432. case 0: *profile_level_val = kVTProfileLevel_H264_Baseline_AutoLevel; break;
  433. case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
  434. case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
  435. case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
  436. case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
  437. case 40: *profile_level_val = kVTProfileLevel_H264_Baseline_4_0; break;
  438. case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
  439. case 42: *profile_level_val = kVTProfileLevel_H264_Baseline_4_2; break;
  440. case 50: *profile_level_val = kVTProfileLevel_H264_Baseline_5_0; break;
  441. case 51: *profile_level_val = kVTProfileLevel_H264_Baseline_5_1; break;
  442. case 52: *profile_level_val = kVTProfileLevel_H264_Baseline_5_2; break;
  443. }
  444. break;
  445. case H264_PROF_MAIN:
  446. switch (vtctx->level) {
  447. case 0: *profile_level_val = kVTProfileLevel_H264_Main_AutoLevel; break;
  448. case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
  449. case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
  450. case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
  451. case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
  452. case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
  453. case 42: *profile_level_val = kVTProfileLevel_H264_Main_4_2; break;
  454. case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
  455. case 51: *profile_level_val = kVTProfileLevel_H264_Main_5_1; break;
  456. case 52: *profile_level_val = kVTProfileLevel_H264_Main_5_2; break;
  457. }
  458. break;
  459. case H264_PROF_HIGH:
  460. switch (vtctx->level) {
  461. case 0: *profile_level_val = kVTProfileLevel_H264_High_AutoLevel; break;
  462. case 30: *profile_level_val = kVTProfileLevel_H264_High_3_0; break;
  463. case 31: *profile_level_val = kVTProfileLevel_H264_High_3_1; break;
  464. case 32: *profile_level_val = kVTProfileLevel_H264_High_3_2; break;
  465. case 40: *profile_level_val = kVTProfileLevel_H264_High_4_0; break;
  466. case 41: *profile_level_val = kVTProfileLevel_H264_High_4_1; break;
  467. case 42: *profile_level_val = kVTProfileLevel_H264_High_4_2; break;
  468. case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
  469. case 51: *profile_level_val = kVTProfileLevel_H264_High_5_1; break;
  470. case 52: *profile_level_val = kVTProfileLevel_H264_High_5_2; break;
  471. }
  472. break;
  473. }
  474. if (!*profile_level_val) {
  475. av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
  476. return false;
  477. }
  478. return true;
  479. }
  480. static int get_cv_pixel_format(AVCodecContext* avctx,
  481. enum AVPixelFormat fmt,
  482. enum AVColorRange range,
  483. int* av_pixel_format,
  484. int* range_guessed)
  485. {
  486. if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
  487. range != AVCOL_RANGE_JPEG;
  488. //MPEG range is used when no range is set
  489. if (fmt == AV_PIX_FMT_NV12) {
  490. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  491. kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
  492. kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  493. } else if (fmt == AV_PIX_FMT_YUV420P) {
  494. *av_pixel_format = range == AVCOL_RANGE_JPEG ?
  495. kCVPixelFormatType_420YpCbCr8PlanarFullRange :
  496. kCVPixelFormatType_420YpCbCr8Planar;
  497. } else {
  498. return AVERROR(EINVAL);
  499. }
  500. return 0;
  501. }
  502. static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
  503. VTEncContext *vtctx = avctx->priv_data;
  504. if (vtctx->color_primaries) {
  505. CFDictionarySetValue(dict,
  506. kCVImageBufferColorPrimariesKey,
  507. vtctx->color_primaries);
  508. }
  509. if (vtctx->transfer_function) {
  510. CFDictionarySetValue(dict,
  511. kCVImageBufferTransferFunctionKey,
  512. vtctx->transfer_function);
  513. }
  514. if (vtctx->ycbcr_matrix) {
  515. CFDictionarySetValue(dict,
  516. kCVImageBufferYCbCrMatrixKey,
  517. vtctx->ycbcr_matrix);
  518. }
  519. }
  520. static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
  521. CFMutableDictionaryRef* dict)
  522. {
  523. CFNumberRef cv_color_format_num = NULL;
  524. CFNumberRef width_num = NULL;
  525. CFNumberRef height_num = NULL;
  526. CFMutableDictionaryRef pixel_buffer_info = NULL;
  527. int cv_color_format;
  528. int status = get_cv_pixel_format(avctx,
  529. avctx->pix_fmt,
  530. avctx->color_range,
  531. &cv_color_format,
  532. NULL);
  533. if (status) return status;
  534. pixel_buffer_info = CFDictionaryCreateMutable(
  535. kCFAllocatorDefault,
  536. 20,
  537. &kCFCopyStringDictionaryKeyCallBacks,
  538. &kCFTypeDictionaryValueCallBacks);
  539. if (!pixel_buffer_info) goto pbinfo_nomem;
  540. cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
  541. kCFNumberSInt32Type,
  542. &cv_color_format);
  543. if (!cv_color_format_num) goto pbinfo_nomem;
  544. CFDictionarySetValue(pixel_buffer_info,
  545. kCVPixelBufferPixelFormatTypeKey,
  546. cv_color_format_num);
  547. vt_release_num(&cv_color_format_num);
  548. width_num = CFNumberCreate(kCFAllocatorDefault,
  549. kCFNumberSInt32Type,
  550. &avctx->width);
  551. if (!width_num) return AVERROR(ENOMEM);
  552. CFDictionarySetValue(pixel_buffer_info,
  553. kCVPixelBufferWidthKey,
  554. width_num);
  555. vt_release_num(&width_num);
  556. height_num = CFNumberCreate(kCFAllocatorDefault,
  557. kCFNumberSInt32Type,
  558. &avctx->height);
  559. if (!height_num) goto pbinfo_nomem;
  560. CFDictionarySetValue(pixel_buffer_info,
  561. kCVPixelBufferHeightKey,
  562. height_num);
  563. vt_release_num(&height_num);
  564. add_color_attr(avctx, pixel_buffer_info);
  565. *dict = pixel_buffer_info;
  566. return 0;
  567. pbinfo_nomem:
  568. vt_release_num(&cv_color_format_num);
  569. vt_release_num(&width_num);
  570. vt_release_num(&height_num);
  571. if (pixel_buffer_info) CFRelease(pixel_buffer_info);
  572. return AVERROR(ENOMEM);
  573. }
  574. static int get_cv_color_primaries(AVCodecContext *avctx,
  575. CFStringRef *primaries)
  576. {
  577. enum AVColorPrimaries pri = avctx->color_primaries;
  578. switch (pri) {
  579. case AVCOL_PRI_UNSPECIFIED:
  580. *primaries = NULL;
  581. break;
  582. case AVCOL_PRI_BT709:
  583. *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
  584. break;
  585. case AVCOL_PRI_BT2020:
  586. *primaries = kCVImageBufferColorPrimaries_ITU_R_2020;
  587. break;
  588. default:
  589. av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
  590. *primaries = NULL;
  591. return -1;
  592. }
  593. return 0;
  594. }
  595. static int get_cv_transfer_function(AVCodecContext *avctx,
  596. CFStringRef *transfer_fnc,
  597. CFNumberRef *gamma_level)
  598. {
  599. enum AVColorTransferCharacteristic trc = avctx->color_trc;
  600. Float32 gamma;
  601. *gamma_level = NULL;
  602. switch (trc) {
  603. case AVCOL_TRC_UNSPECIFIED:
  604. *transfer_fnc = NULL;
  605. break;
  606. case AVCOL_TRC_BT709:
  607. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
  608. break;
  609. case AVCOL_TRC_SMPTE240M:
  610. *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
  611. break;
  612. case AVCOL_TRC_GAMMA22:
  613. gamma = 2.2;
  614. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  615. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  616. break;
  617. case AVCOL_TRC_GAMMA28:
  618. gamma = 2.8;
  619. *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
  620. *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
  621. break;
  622. case AVCOL_TRC_BT2020_10:
  623. case AVCOL_TRC_BT2020_12:
  624. *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2020;
  625. break;
  626. default:
  627. av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
  628. return -1;
  629. }
  630. return 0;
  631. }
  632. static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
  633. switch(avctx->colorspace) {
  634. case AVCOL_SPC_BT709:
  635. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
  636. break;
  637. case AVCOL_SPC_UNSPECIFIED:
  638. *matrix = NULL;
  639. break;
  640. case AVCOL_SPC_BT470BG:
  641. case AVCOL_SPC_SMPTE170M:
  642. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
  643. break;
  644. case AVCOL_SPC_SMPTE240M:
  645. *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
  646. break;
  647. case AVCOL_SPC_BT2020_NCL:
  648. *matrix = kCVImageBufferYCbCrMatrix_ITU_R_2020;
  649. break;
  650. default:
  651. av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
  652. return -1;
  653. }
  654. return 0;
  655. }
  656. static int vtenc_create_encoder(AVCodecContext *avctx,
  657. CMVideoCodecType codec_type,
  658. CFStringRef profile_level,
  659. CFNumberRef gamma_level,
  660. CFDictionaryRef enc_info,
  661. CFDictionaryRef pixel_buffer_info,
  662. VTCompressionSessionRef *session)
  663. {
  664. VTEncContext *vtctx = avctx->priv_data;
  665. SInt32 bit_rate = avctx->bit_rate;
  666. CFNumberRef bit_rate_num;
  667. int status = VTCompressionSessionCreate(kCFAllocatorDefault,
  668. avctx->width,
  669. avctx->height,
  670. codec_type,
  671. enc_info,
  672. pixel_buffer_info,
  673. kCFAllocatorDefault,
  674. vtenc_output_callback,
  675. avctx,
  676. session);
  677. if (status || !vtctx->session) {
  678. av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
  679. #if !TARGET_OS_IPHONE
  680. if (!vtctx->allow_sw) {
  681. av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
  682. }
  683. #endif
  684. return AVERROR_EXTERNAL;
  685. }
  686. bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
  687. kCFNumberSInt32Type,
  688. &bit_rate);
  689. if (!bit_rate_num) return AVERROR(ENOMEM);
  690. status = VTSessionSetProperty(vtctx->session,
  691. kVTCompressionPropertyKey_AverageBitRate,
  692. bit_rate_num);
  693. CFRelease(bit_rate_num);
  694. if (status) {
  695. av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
  696. return AVERROR_EXTERNAL;
  697. }
  698. if (profile_level) {
  699. status = VTSessionSetProperty(vtctx->session,
  700. kVTCompressionPropertyKey_ProfileLevel,
  701. profile_level);
  702. if (status) {
  703. av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
  704. return AVERROR_EXTERNAL;
  705. }
  706. }
  707. if (avctx->gop_size > 0) {
  708. CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
  709. kCFNumberIntType,
  710. &avctx->gop_size);
  711. if (!interval) {
  712. return AVERROR(ENOMEM);
  713. }
  714. status = VTSessionSetProperty(vtctx->session,
  715. kVTCompressionPropertyKey_MaxKeyFrameInterval,
  716. interval);
  717. CFRelease(interval);
  718. if (status) {
  719. av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
  720. return AVERROR_EXTERNAL;
  721. }
  722. }
  723. if (vtctx->frames_before) {
  724. status = VTSessionSetProperty(vtctx->session,
  725. kVTCompressionPropertyKey_MoreFramesBeforeStart,
  726. kCFBooleanTrue);
  727. if (status == kVTPropertyNotSupportedErr) {
  728. av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
  729. } else if (status) {
  730. av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
  731. }
  732. }
  733. if (vtctx->frames_after) {
  734. status = VTSessionSetProperty(vtctx->session,
  735. kVTCompressionPropertyKey_MoreFramesAfterEnd,
  736. kCFBooleanTrue);
  737. if (status == kVTPropertyNotSupportedErr) {
  738. av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
  739. } else if (status) {
  740. av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
  741. }
  742. }
  743. if (avctx->sample_aspect_ratio.num != 0) {
  744. CFNumberRef num;
  745. CFNumberRef den;
  746. CFMutableDictionaryRef par;
  747. AVRational *avpar = &avctx->sample_aspect_ratio;
  748. av_reduce(&avpar->num, &avpar->den,
  749. avpar->num, avpar->den,
  750. 0xFFFFFFFF);
  751. num = CFNumberCreate(kCFAllocatorDefault,
  752. kCFNumberIntType,
  753. &avpar->num);
  754. den = CFNumberCreate(kCFAllocatorDefault,
  755. kCFNumberIntType,
  756. &avpar->den);
  757. par = CFDictionaryCreateMutable(kCFAllocatorDefault,
  758. 2,
  759. &kCFCopyStringDictionaryKeyCallBacks,
  760. &kCFTypeDictionaryValueCallBacks);
  761. if (!par || !num || !den) {
  762. if (par) CFRelease(par);
  763. if (num) CFRelease(num);
  764. if (den) CFRelease(den);
  765. return AVERROR(ENOMEM);
  766. }
  767. CFDictionarySetValue(
  768. par,
  769. kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
  770. num);
  771. CFDictionarySetValue(
  772. par,
  773. kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
  774. den);
  775. status = VTSessionSetProperty(vtctx->session,
  776. kVTCompressionPropertyKey_PixelAspectRatio,
  777. par);
  778. CFRelease(par);
  779. CFRelease(num);
  780. CFRelease(den);
  781. if (status) {
  782. av_log(avctx,
  783. AV_LOG_ERROR,
  784. "Error setting pixel aspect ratio to %d:%d: %d.\n",
  785. avctx->sample_aspect_ratio.num,
  786. avctx->sample_aspect_ratio.den,
  787. status);
  788. return AVERROR_EXTERNAL;
  789. }
  790. }
  791. if (vtctx->transfer_function) {
  792. status = VTSessionSetProperty(vtctx->session,
  793. kVTCompressionPropertyKey_TransferFunction,
  794. vtctx->transfer_function);
  795. if (status) {
  796. av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
  797. }
  798. }
  799. if (vtctx->ycbcr_matrix) {
  800. status = VTSessionSetProperty(vtctx->session,
  801. kVTCompressionPropertyKey_YCbCrMatrix,
  802. vtctx->ycbcr_matrix);
  803. if (status) {
  804. av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
  805. }
  806. }
  807. if (vtctx->color_primaries) {
  808. status = VTSessionSetProperty(vtctx->session,
  809. kVTCompressionPropertyKey_ColorPrimaries,
  810. vtctx->color_primaries);
  811. if (status) {
  812. av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
  813. }
  814. }
  815. if (gamma_level) {
  816. status = VTSessionSetProperty(vtctx->session,
  817. kCVImageBufferGammaLevelKey,
  818. gamma_level);
  819. if (status) {
  820. av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
  821. }
  822. }
  823. if (!vtctx->has_b_frames) {
  824. status = VTSessionSetProperty(vtctx->session,
  825. kVTCompressionPropertyKey_AllowFrameReordering,
  826. kCFBooleanFalse);
  827. if (status) {
  828. av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
  829. return AVERROR_EXTERNAL;
  830. }
  831. }
  832. if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
  833. CFStringRef entropy = vtctx->entropy == VT_CABAC ?
  834. kVTH264EntropyMode_CABAC:
  835. kVTH264EntropyMode_CAVLC;
  836. status = VTSessionSetProperty(vtctx->session,
  837. kVTCompressionPropertyKey_H264EntropyMode,
  838. entropy);
  839. if (status) {
  840. av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
  841. return AVERROR_EXTERNAL;
  842. }
  843. }
  844. if (vtctx->realtime) {
  845. status = VTSessionSetProperty(vtctx->session,
  846. kVTCompressionPropertyKey_RealTime,
  847. kCFBooleanTrue);
  848. if (status) {
  849. av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
  850. }
  851. }
  852. status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
  853. if (status) {
  854. av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
  855. return AVERROR_EXTERNAL;
  856. }
  857. return 0;
  858. }
  859. static av_cold int vtenc_init(AVCodecContext *avctx)
  860. {
  861. CFMutableDictionaryRef enc_info;
  862. CFMutableDictionaryRef pixel_buffer_info;
  863. CMVideoCodecType codec_type;
  864. VTEncContext *vtctx = avctx->priv_data;
  865. CFStringRef profile_level;
  866. CFBooleanRef has_b_frames_cfbool;
  867. CFNumberRef gamma_level = NULL;
  868. int status;
  869. codec_type = get_cm_codec_type(avctx->codec_id);
  870. if (!codec_type) {
  871. av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
  872. return AVERROR(EINVAL);
  873. }
  874. vtctx->has_b_frames = avctx->max_b_frames > 0;
  875. if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
  876. av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
  877. vtctx->has_b_frames = false;
  878. }
  879. if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
  880. av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
  881. vtctx->entropy = VT_ENTROPY_NOT_SET;
  882. }
  883. if (!get_vt_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
  884. vtctx->session = NULL;
  885. enc_info = CFDictionaryCreateMutable(
  886. kCFAllocatorDefault,
  887. 20,
  888. &kCFCopyStringDictionaryKeyCallBacks,
  889. &kCFTypeDictionaryValueCallBacks
  890. );
  891. if (!enc_info) return AVERROR(ENOMEM);
  892. #if !TARGET_OS_IPHONE
  893. if (!vtctx->allow_sw) {
  894. CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
  895. } else {
  896. CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
  897. }
  898. #endif
  899. if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
  900. status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
  901. if (status)
  902. goto init_cleanup;
  903. } else {
  904. pixel_buffer_info = NULL;
  905. }
  906. pthread_mutex_init(&vtctx->lock, NULL);
  907. pthread_cond_init(&vtctx->cv_sample_sent, NULL);
  908. vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
  909. get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
  910. get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
  911. get_cv_color_primaries(avctx, &vtctx->color_primaries);
  912. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
  913. status = vtenc_populate_extradata(avctx,
  914. codec_type,
  915. profile_level,
  916. gamma_level,
  917. enc_info,
  918. pixel_buffer_info);
  919. if (status)
  920. goto init_cleanup;
  921. }
  922. status = vtenc_create_encoder(avctx,
  923. codec_type,
  924. profile_level,
  925. gamma_level,
  926. enc_info,
  927. pixel_buffer_info,
  928. &vtctx->session);
  929. if (status < 0)
  930. goto init_cleanup;
  931. status = VTSessionCopyProperty(vtctx->session,
  932. kVTCompressionPropertyKey_AllowFrameReordering,
  933. kCFAllocatorDefault,
  934. &has_b_frames_cfbool);
  935. if (!status) {
  936. //Some devices don't output B-frames for main profile, even if requested.
  937. vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
  938. CFRelease(has_b_frames_cfbool);
  939. }
  940. avctx->has_b_frames = vtctx->has_b_frames;
  941. init_cleanup:
  942. if (gamma_level)
  943. CFRelease(gamma_level);
  944. if (pixel_buffer_info)
  945. CFRelease(pixel_buffer_info);
  946. CFRelease(enc_info);
  947. return status;
  948. }
  949. static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
  950. {
  951. CFArrayRef attachments;
  952. CFDictionaryRef attachment;
  953. CFBooleanRef not_sync;
  954. CFIndex len;
  955. attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
  956. len = !attachments ? 0 : CFArrayGetCount(attachments);
  957. if (!len) {
  958. *is_key_frame = true;
  959. return;
  960. }
  961. attachment = CFArrayGetValueAtIndex(attachments, 0);
  962. if (CFDictionaryGetValueIfPresent(attachment,
  963. kCMSampleAttachmentKey_NotSync,
  964. (const void **)&not_sync))
  965. {
  966. *is_key_frame = !CFBooleanGetValue(not_sync);
  967. } else {
  968. *is_key_frame = true;
  969. }
  970. }
  971. static int is_post_sei_nal_type(int nal_type){
  972. return nal_type != H264_NAL_SEI &&
  973. nal_type != H264_NAL_SPS &&
  974. nal_type != H264_NAL_PPS &&
  975. nal_type != H264_NAL_AUD;
  976. }
  977. /*
  978. * Finds the sei message start/size of type find_sei_type.
  979. * If more than one of that type exists, the last one is returned.
  980. */
  981. static int find_sei_end(AVCodecContext *avctx,
  982. uint8_t *nal_data,
  983. size_t nal_size,
  984. uint8_t **sei_end)
  985. {
  986. int nal_type;
  987. size_t sei_payload_size = 0;
  988. int sei_payload_type = 0;
  989. *sei_end = NULL;
  990. uint8_t *nal_start = nal_data;
  991. if (!nal_size)
  992. return 0;
  993. nal_type = *nal_data & 0x1F;
  994. if (nal_type != H264_NAL_SEI)
  995. return 0;
  996. nal_data++;
  997. nal_size--;
  998. if (nal_data[nal_size - 1] == 0x80)
  999. nal_size--;
  1000. while (nal_size > 0 && *nal_data > 0) {
  1001. do{
  1002. sei_payload_type += *nal_data;
  1003. nal_data++;
  1004. nal_size--;
  1005. } while (nal_size > 0 && *nal_data == 0xFF);
  1006. if (!nal_size) {
  1007. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
  1008. return AVERROR_INVALIDDATA;
  1009. }
  1010. do{
  1011. sei_payload_size += *nal_data;
  1012. nal_data++;
  1013. nal_size--;
  1014. } while (nal_size > 0 && *nal_data == 0xFF);
  1015. if (nal_size < sei_payload_size) {
  1016. av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
  1017. return AVERROR_INVALIDDATA;
  1018. }
  1019. nal_data += sei_payload_size;
  1020. nal_size -= sei_payload_size;
  1021. }
  1022. *sei_end = nal_data;
  1023. return nal_data - nal_start + 1;
  1024. }
  1025. /**
  1026. * Copies the data inserting emulation prevention bytes as needed.
  1027. * Existing data in the destination can be taken into account by providing
  1028. * dst with a dst_offset > 0.
  1029. *
  1030. * @return The number of bytes copied on success. On failure, the negative of
  1031. * the number of bytes needed to copy src is returned.
  1032. */
  1033. static int copy_emulation_prev(const uint8_t *src,
  1034. size_t src_size,
  1035. uint8_t *dst,
  1036. ssize_t dst_offset,
  1037. size_t dst_size)
  1038. {
  1039. int zeros = 0;
  1040. int wrote_bytes;
  1041. uint8_t* dst_start;
  1042. uint8_t* dst_end = dst + dst_size;
  1043. const uint8_t* src_end = src + src_size;
  1044. int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
  1045. int i;
  1046. for (i = start_at; i < dst_offset && i < dst_size; i++) {
  1047. if (!dst[i])
  1048. zeros++;
  1049. else
  1050. zeros = 0;
  1051. }
  1052. dst += dst_offset;
  1053. dst_start = dst;
  1054. for (; src < src_end; src++, dst++) {
  1055. if (zeros == 2) {
  1056. int insert_ep3_byte = *src <= 3;
  1057. if (insert_ep3_byte) {
  1058. if (dst < dst_end)
  1059. *dst = 3;
  1060. dst++;
  1061. }
  1062. zeros = 0;
  1063. }
  1064. if (dst < dst_end)
  1065. *dst = *src;
  1066. if (!*src)
  1067. zeros++;
  1068. else
  1069. zeros = 0;
  1070. }
  1071. wrote_bytes = dst - dst_start;
  1072. if (dst > dst_end)
  1073. return -wrote_bytes;
  1074. return wrote_bytes;
  1075. }
  1076. static int write_sei(const ExtraSEI *sei,
  1077. int sei_type,
  1078. uint8_t *dst,
  1079. size_t dst_size)
  1080. {
  1081. uint8_t *sei_start = dst;
  1082. size_t remaining_sei_size = sei->size;
  1083. size_t remaining_dst_size = dst_size;
  1084. int header_bytes;
  1085. int bytes_written;
  1086. ssize_t offset;
  1087. if (!remaining_dst_size)
  1088. return AVERROR_BUFFER_TOO_SMALL;
  1089. while (sei_type && remaining_dst_size != 0) {
  1090. int sei_byte = sei_type > 255 ? 255 : sei_type;
  1091. *dst = sei_byte;
  1092. sei_type -= sei_byte;
  1093. dst++;
  1094. remaining_dst_size--;
  1095. }
  1096. if (!dst_size)
  1097. return AVERROR_BUFFER_TOO_SMALL;
  1098. while (remaining_sei_size && remaining_dst_size != 0) {
  1099. int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
  1100. *dst = size_byte;
  1101. remaining_sei_size -= size_byte;
  1102. dst++;
  1103. remaining_dst_size--;
  1104. }
  1105. if (remaining_dst_size < sei->size)
  1106. return AVERROR_BUFFER_TOO_SMALL;
  1107. header_bytes = dst - sei_start;
  1108. offset = header_bytes;
  1109. bytes_written = copy_emulation_prev(sei->data,
  1110. sei->size,
  1111. sei_start,
  1112. offset,
  1113. dst_size);
  1114. if (bytes_written < 0)
  1115. return AVERROR_BUFFER_TOO_SMALL;
  1116. bytes_written += header_bytes;
  1117. return bytes_written;
  1118. }
  1119. /**
  1120. * Copies NAL units and replaces length codes with
  1121. * H.264 Annex B start codes. On failure, the contents of
  1122. * dst_data may have been modified.
  1123. *
  1124. * @param length_code_size Byte length of each length code
  1125. * @param sample_buffer NAL units prefixed with length codes.
  1126. * @param sei Optional A53 closed captions SEI data.
  1127. * @param dst_data Must be zeroed before calling this function.
  1128. * Contains the copied NAL units prefixed with
  1129. * start codes when the function returns
  1130. * successfully.
  1131. * @param dst_size Length of dst_data
  1132. * @return 0 on success
  1133. * AVERROR_INVALIDDATA if length_code_size is invalid
  1134. * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
  1135. * or if a length_code in src_data specifies data beyond
  1136. * the end of its buffer.
  1137. */
  1138. static int copy_replace_length_codes(
  1139. AVCodecContext *avctx,
  1140. size_t length_code_size,
  1141. CMSampleBufferRef sample_buffer,
  1142. ExtraSEI *sei,
  1143. uint8_t *dst_data,
  1144. size_t dst_size)
  1145. {
  1146. size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1147. size_t remaining_src_size = src_size;
  1148. size_t remaining_dst_size = dst_size;
  1149. size_t src_offset = 0;
  1150. int wrote_sei = 0;
  1151. int status;
  1152. uint8_t size_buf[4];
  1153. uint8_t nal_type;
  1154. CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
  1155. if (length_code_size > 4) {
  1156. return AVERROR_INVALIDDATA;
  1157. }
  1158. while (remaining_src_size > 0) {
  1159. size_t curr_src_len;
  1160. size_t curr_dst_len;
  1161. size_t box_len = 0;
  1162. size_t i;
  1163. uint8_t *dst_box;
  1164. status = CMBlockBufferCopyDataBytes(block,
  1165. src_offset,
  1166. length_code_size,
  1167. size_buf);
  1168. if (status) {
  1169. av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
  1170. return AVERROR_EXTERNAL;
  1171. }
  1172. status = CMBlockBufferCopyDataBytes(block,
  1173. src_offset + length_code_size,
  1174. 1,
  1175. &nal_type);
  1176. if (status) {
  1177. av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
  1178. return AVERROR_EXTERNAL;
  1179. }
  1180. nal_type &= 0x1F;
  1181. for (i = 0; i < length_code_size; i++) {
  1182. box_len <<= 8;
  1183. box_len |= size_buf[i];
  1184. }
  1185. if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
  1186. //No SEI NAL unit - insert.
  1187. int wrote_bytes;
  1188. memcpy(dst_data, start_code, sizeof(start_code));
  1189. dst_data += sizeof(start_code);
  1190. remaining_dst_size -= sizeof(start_code);
  1191. *dst_data = H264_NAL_SEI;
  1192. dst_data++;
  1193. remaining_dst_size--;
  1194. wrote_bytes = write_sei(sei,
  1195. SEI_TYPE_USER_DATA_REGISTERED,
  1196. dst_data,
  1197. remaining_dst_size);
  1198. if (wrote_bytes < 0)
  1199. return wrote_bytes;
  1200. remaining_dst_size -= wrote_bytes;
  1201. dst_data += wrote_bytes;
  1202. if (remaining_dst_size <= 0)
  1203. return AVERROR_BUFFER_TOO_SMALL;
  1204. *dst_data = 0x80;
  1205. dst_data++;
  1206. remaining_dst_size--;
  1207. wrote_sei = 1;
  1208. }
  1209. curr_src_len = box_len + length_code_size;
  1210. curr_dst_len = box_len + sizeof(start_code);
  1211. if (remaining_src_size < curr_src_len) {
  1212. return AVERROR_BUFFER_TOO_SMALL;
  1213. }
  1214. if (remaining_dst_size < curr_dst_len) {
  1215. return AVERROR_BUFFER_TOO_SMALL;
  1216. }
  1217. dst_box = dst_data + sizeof(start_code);
  1218. memcpy(dst_data, start_code, sizeof(start_code));
  1219. status = CMBlockBufferCopyDataBytes(block,
  1220. src_offset + length_code_size,
  1221. box_len,
  1222. dst_box);
  1223. if (status) {
  1224. av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
  1225. return AVERROR_EXTERNAL;
  1226. }
  1227. if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
  1228. //Found SEI NAL unit - append.
  1229. int wrote_bytes;
  1230. int old_sei_length;
  1231. int extra_bytes;
  1232. uint8_t *new_sei;
  1233. old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
  1234. if (old_sei_length < 0)
  1235. return status;
  1236. wrote_bytes = write_sei(sei,
  1237. SEI_TYPE_USER_DATA_REGISTERED,
  1238. new_sei,
  1239. remaining_dst_size - old_sei_length);
  1240. if (wrote_bytes < 0)
  1241. return wrote_bytes;
  1242. if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
  1243. return AVERROR_BUFFER_TOO_SMALL;
  1244. new_sei[wrote_bytes++] = 0x80;
  1245. extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
  1246. dst_data += extra_bytes;
  1247. remaining_dst_size -= extra_bytes;
  1248. wrote_sei = 1;
  1249. }
  1250. src_offset += curr_src_len;
  1251. dst_data += curr_dst_len;
  1252. remaining_src_size -= curr_src_len;
  1253. remaining_dst_size -= curr_dst_len;
  1254. }
  1255. return 0;
  1256. }
  1257. /**
  1258. * Returns a sufficient number of bytes to contain the sei data.
  1259. * It may be greater than the minimum required.
  1260. */
  1261. static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
  1262. int copied_size;
  1263. if (sei->size == 0)
  1264. return 0;
  1265. copied_size = -copy_emulation_prev(sei->data,
  1266. sei->size,
  1267. NULL,
  1268. 0,
  1269. 0);
  1270. if ((sei->size % 255) == 0) //may result in an extra byte
  1271. copied_size++;
  1272. return copied_size + sei->size / 255 + 1 + type / 255 + 1;
  1273. }
  1274. static int vtenc_cm_to_avpacket(
  1275. AVCodecContext *avctx,
  1276. CMSampleBufferRef sample_buffer,
  1277. AVPacket *pkt,
  1278. ExtraSEI *sei)
  1279. {
  1280. VTEncContext *vtctx = avctx->priv_data;
  1281. int status;
  1282. bool is_key_frame;
  1283. bool add_header;
  1284. size_t length_code_size;
  1285. size_t header_size = 0;
  1286. size_t in_buf_size;
  1287. size_t out_buf_size;
  1288. size_t sei_nalu_size = 0;
  1289. int64_t dts_delta;
  1290. int64_t time_base_num;
  1291. int nalu_count;
  1292. CMTime pts;
  1293. CMTime dts;
  1294. CMVideoFormatDescriptionRef vid_fmt;
  1295. vtenc_get_frame_info(sample_buffer, &is_key_frame);
  1296. status = get_length_code_size(avctx, sample_buffer, &length_code_size);
  1297. if (status) return status;
  1298. add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
  1299. if (add_header) {
  1300. vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
  1301. if (!vid_fmt) {
  1302. av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
  1303. return AVERROR_EXTERNAL;
  1304. }
  1305. int status = get_params_size(avctx, vid_fmt, &header_size);
  1306. if (status) return status;
  1307. }
  1308. status = count_nalus(length_code_size, sample_buffer, &nalu_count);
  1309. if(status)
  1310. return status;
  1311. if (sei) {
  1312. size_t msg_size = get_sei_msg_bytes(sei,
  1313. SEI_TYPE_USER_DATA_REGISTERED);
  1314. sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
  1315. }
  1316. in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
  1317. out_buf_size = header_size +
  1318. in_buf_size +
  1319. sei_nalu_size +
  1320. nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
  1321. status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
  1322. if (status < 0)
  1323. return status;
  1324. if (add_header) {
  1325. status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
  1326. if(status) return status;
  1327. }
  1328. status = copy_replace_length_codes(
  1329. avctx,
  1330. length_code_size,
  1331. sample_buffer,
  1332. sei,
  1333. pkt->data + header_size,
  1334. pkt->size - header_size
  1335. );
  1336. if (status) {
  1337. av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
  1338. return status;
  1339. }
  1340. if (is_key_frame) {
  1341. pkt->flags |= AV_PKT_FLAG_KEY;
  1342. }
  1343. pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
  1344. dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
  1345. if (CMTIME_IS_INVALID(dts)) {
  1346. if (!vtctx->has_b_frames) {
  1347. dts = pts;
  1348. } else {
  1349. av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
  1350. return AVERROR_EXTERNAL;
  1351. }
  1352. }
  1353. dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
  1354. time_base_num = avctx->time_base.num;
  1355. pkt->pts = pts.value / time_base_num;
  1356. pkt->dts = dts.value / time_base_num - dts_delta;
  1357. pkt->size = out_buf_size;
  1358. return 0;
  1359. }
  1360. /*
  1361. * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
  1362. * containing all planes if so.
  1363. */
  1364. static int get_cv_pixel_info(
  1365. AVCodecContext *avctx,
  1366. const AVFrame *frame,
  1367. int *color,
  1368. int *plane_count,
  1369. size_t *widths,
  1370. size_t *heights,
  1371. size_t *strides,
  1372. size_t *contiguous_buf_size)
  1373. {
  1374. VTEncContext *vtctx = avctx->priv_data;
  1375. int av_format = frame->format;
  1376. int av_color_range = av_frame_get_color_range(frame);
  1377. int i;
  1378. int range_guessed;
  1379. int status;
  1380. status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
  1381. if (status) {
  1382. av_log(avctx,
  1383. AV_LOG_ERROR,
  1384. "Could not get pixel format for color format '%s' range '%s'.\n",
  1385. av_get_pix_fmt_name(av_format),
  1386. av_color_range > AVCOL_RANGE_UNSPECIFIED &&
  1387. av_color_range < AVCOL_RANGE_NB ?
  1388. av_color_range_name(av_color_range) :
  1389. "Unknown");
  1390. return AVERROR(EINVAL);
  1391. }
  1392. if (range_guessed) {
  1393. if (!vtctx->warned_color_range) {
  1394. vtctx->warned_color_range = true;
  1395. av_log(avctx,
  1396. AV_LOG_WARNING,
  1397. "Color range not set for %s. Using MPEG range.\n",
  1398. av_get_pix_fmt_name(av_format));
  1399. }
  1400. av_log(avctx, AV_LOG_WARNING, "");
  1401. }
  1402. switch (av_format) {
  1403. case AV_PIX_FMT_NV12:
  1404. *plane_count = 2;
  1405. widths [0] = avctx->width;
  1406. heights[0] = avctx->height;
  1407. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1408. widths [1] = (avctx->width + 1) / 2;
  1409. heights[1] = (avctx->height + 1) / 2;
  1410. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
  1411. break;
  1412. case AV_PIX_FMT_YUV420P:
  1413. *plane_count = 3;
  1414. widths [0] = avctx->width;
  1415. heights[0] = avctx->height;
  1416. strides[0] = frame ? frame->linesize[0] : avctx->width;
  1417. widths [1] = (avctx->width + 1) / 2;
  1418. heights[1] = (avctx->height + 1) / 2;
  1419. strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
  1420. widths [2] = (avctx->width + 1) / 2;
  1421. heights[2] = (avctx->height + 1) / 2;
  1422. strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
  1423. break;
  1424. default:
  1425. av_log(
  1426. avctx,
  1427. AV_LOG_ERROR,
  1428. "Could not get frame format info for color %d range %d.\n",
  1429. av_format,
  1430. av_color_range);
  1431. return AVERROR(EINVAL);
  1432. }
  1433. *contiguous_buf_size = 0;
  1434. for (i = 0; i < *plane_count; i++) {
  1435. if (i < *plane_count - 1 &&
  1436. frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
  1437. *contiguous_buf_size = 0;
  1438. break;
  1439. }
  1440. *contiguous_buf_size += strides[i] * heights[i];
  1441. }
  1442. return 0;
  1443. }
  1444. #if !TARGET_OS_IPHONE
  1445. //Not used on iOS - frame is always copied.
  1446. static void free_avframe(
  1447. void *release_ctx,
  1448. const void *data,
  1449. size_t size,
  1450. size_t plane_count,
  1451. const void *plane_addresses[])
  1452. {
  1453. AVFrame *frame = release_ctx;
  1454. av_frame_free(&frame);
  1455. }
  1456. #else
  1457. //Not used on OSX - frame is never copied.
  1458. static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
  1459. const AVFrame *frame,
  1460. CVPixelBufferRef cv_img,
  1461. const size_t *plane_strides,
  1462. const size_t *plane_rows)
  1463. {
  1464. int i, j;
  1465. size_t plane_count;
  1466. int status;
  1467. int rows;
  1468. int src_stride;
  1469. int dst_stride;
  1470. uint8_t *src_addr;
  1471. uint8_t *dst_addr;
  1472. size_t copy_bytes;
  1473. status = CVPixelBufferLockBaseAddress(cv_img, 0);
  1474. if (status) {
  1475. av_log(
  1476. avctx,
  1477. AV_LOG_ERROR,
  1478. "Error: Could not lock base address of CVPixelBuffer: %d.\n",
  1479. status
  1480. );
  1481. }
  1482. if (CVPixelBufferIsPlanar(cv_img)) {
  1483. plane_count = CVPixelBufferGetPlaneCount(cv_img);
  1484. for (i = 0; frame->data[i]; i++) {
  1485. if (i == plane_count) {
  1486. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1487. av_log(avctx,
  1488. AV_LOG_ERROR,
  1489. "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
  1490. );
  1491. return AVERROR_EXTERNAL;
  1492. }
  1493. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
  1494. src_addr = (uint8_t*)frame->data[i];
  1495. dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
  1496. src_stride = plane_strides[i];
  1497. rows = plane_rows[i];
  1498. if (dst_stride == src_stride) {
  1499. memcpy(dst_addr, src_addr, src_stride * rows);
  1500. } else {
  1501. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1502. for (j = 0; j < rows; j++) {
  1503. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1504. }
  1505. }
  1506. }
  1507. } else {
  1508. if (frame->data[1]) {
  1509. CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1510. av_log(avctx,
  1511. AV_LOG_ERROR,
  1512. "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
  1513. );
  1514. return AVERROR_EXTERNAL;
  1515. }
  1516. dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
  1517. src_addr = (uint8_t*)frame->data[0];
  1518. dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
  1519. src_stride = plane_strides[0];
  1520. rows = plane_rows[0];
  1521. if (dst_stride == src_stride) {
  1522. memcpy(dst_addr, src_addr, src_stride * rows);
  1523. } else {
  1524. copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
  1525. for (j = 0; j < rows; j++) {
  1526. memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
  1527. }
  1528. }
  1529. }
  1530. status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
  1531. if (status) {
  1532. av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
  1533. return AVERROR_EXTERNAL;
  1534. }
  1535. return 0;
  1536. }
  1537. #endif //!TARGET_OS_IPHONE
  1538. static int create_cv_pixel_buffer(AVCodecContext *avctx,
  1539. const AVFrame *frame,
  1540. CVPixelBufferRef *cv_img)
  1541. {
  1542. int plane_count;
  1543. int color;
  1544. size_t widths [AV_NUM_DATA_POINTERS];
  1545. size_t heights[AV_NUM_DATA_POINTERS];
  1546. size_t strides[AV_NUM_DATA_POINTERS];
  1547. int status;
  1548. size_t contiguous_buf_size;
  1549. #if TARGET_OS_IPHONE
  1550. CVPixelBufferPoolRef pix_buf_pool;
  1551. VTEncContext* vtctx = avctx->priv_data;
  1552. #else
  1553. CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
  1554. kCFAllocatorDefault,
  1555. 10,
  1556. &kCFCopyStringDictionaryKeyCallBacks,
  1557. &kCFTypeDictionaryValueCallBacks);
  1558. if (!pix_buf_attachments) return AVERROR(ENOMEM);
  1559. #endif
  1560. if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
  1561. av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
  1562. *cv_img = (CVPixelBufferRef)frame->data[3];
  1563. av_assert0(*cv_img);
  1564. CFRetain(*cv_img);
  1565. return 0;
  1566. }
  1567. memset(widths, 0, sizeof(widths));
  1568. memset(heights, 0, sizeof(heights));
  1569. memset(strides, 0, sizeof(strides));
  1570. status = get_cv_pixel_info(
  1571. avctx,
  1572. frame,
  1573. &color,
  1574. &plane_count,
  1575. widths,
  1576. heights,
  1577. strides,
  1578. &contiguous_buf_size
  1579. );
  1580. if (status) {
  1581. av_log(
  1582. avctx,
  1583. AV_LOG_ERROR,
  1584. "Error: Cannot convert format %d color_range %d: %d\n",
  1585. frame->format,
  1586. av_frame_get_color_range(frame),
  1587. status
  1588. );
  1589. return AVERROR_EXTERNAL;
  1590. }
  1591. #if TARGET_OS_IPHONE
  1592. pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
  1593. if (!pix_buf_pool) {
  1594. av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
  1595. return AVERROR_EXTERNAL;
  1596. }
  1597. status = CVPixelBufferPoolCreatePixelBuffer(NULL,
  1598. pix_buf_pool,
  1599. cv_img);
  1600. if (status) {
  1601. av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
  1602. return AVERROR_EXTERNAL;
  1603. }
  1604. status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
  1605. if (status) {
  1606. CFRelease(*cv_img);
  1607. *cv_img = NULL;
  1608. return status;
  1609. }
  1610. #else
  1611. AVFrame *enc_frame = av_frame_alloc();
  1612. if (!enc_frame) return AVERROR(ENOMEM);
  1613. status = av_frame_ref(enc_frame, frame);
  1614. if (status) {
  1615. av_frame_free(&enc_frame);
  1616. return status;
  1617. }
  1618. status = CVPixelBufferCreateWithPlanarBytes(
  1619. kCFAllocatorDefault,
  1620. enc_frame->width,
  1621. enc_frame->height,
  1622. color,
  1623. NULL,
  1624. contiguous_buf_size,
  1625. plane_count,
  1626. (void **)enc_frame->data,
  1627. widths,
  1628. heights,
  1629. strides,
  1630. free_avframe,
  1631. enc_frame,
  1632. NULL,
  1633. cv_img
  1634. );
  1635. add_color_attr(avctx, pix_buf_attachments);
  1636. CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
  1637. CFRelease(pix_buf_attachments);
  1638. if (status) {
  1639. av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
  1640. return AVERROR_EXTERNAL;
  1641. }
  1642. #endif
  1643. return 0;
  1644. }
  1645. static int create_encoder_dict_h264(const AVFrame *frame,
  1646. CFDictionaryRef* dict_out)
  1647. {
  1648. CFDictionaryRef dict = NULL;
  1649. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  1650. const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
  1651. const void *vals[] = { kCFBooleanTrue };
  1652. dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
  1653. if(!dict) return AVERROR(ENOMEM);
  1654. }
  1655. *dict_out = dict;
  1656. return 0;
  1657. }
  1658. static int vtenc_send_frame(AVCodecContext *avctx,
  1659. VTEncContext *vtctx,
  1660. const AVFrame *frame)
  1661. {
  1662. CMTime time;
  1663. CFDictionaryRef frame_dict;
  1664. CVPixelBufferRef cv_img = NULL;
  1665. AVFrameSideData *side_data = NULL;
  1666. ExtraSEI *sei = NULL;
  1667. int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
  1668. if (status) return status;
  1669. status = create_encoder_dict_h264(frame, &frame_dict);
  1670. if (status) {
  1671. CFRelease(cv_img);
  1672. return status;
  1673. }
  1674. side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
  1675. if (vtctx->a53_cc && side_data && side_data->size) {
  1676. sei = av_mallocz(sizeof(*sei));
  1677. if (!sei) {
  1678. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1679. } else {
  1680. int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
  1681. if (ret < 0) {
  1682. av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
  1683. av_free(sei);
  1684. sei = NULL;
  1685. }
  1686. }
  1687. }
  1688. time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
  1689. status = VTCompressionSessionEncodeFrame(
  1690. vtctx->session,
  1691. cv_img,
  1692. time,
  1693. kCMTimeInvalid,
  1694. frame_dict,
  1695. sei,
  1696. NULL
  1697. );
  1698. if (frame_dict) CFRelease(frame_dict);
  1699. CFRelease(cv_img);
  1700. if (status) {
  1701. av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
  1702. return AVERROR_EXTERNAL;
  1703. }
  1704. return 0;
  1705. }
  1706. static av_cold int vtenc_frame(
  1707. AVCodecContext *avctx,
  1708. AVPacket *pkt,
  1709. const AVFrame *frame,
  1710. int *got_packet)
  1711. {
  1712. VTEncContext *vtctx = avctx->priv_data;
  1713. bool get_frame;
  1714. int status;
  1715. CMSampleBufferRef buf = NULL;
  1716. ExtraSEI *sei = NULL;
  1717. if (frame) {
  1718. status = vtenc_send_frame(avctx, vtctx, frame);
  1719. if (status) {
  1720. status = AVERROR_EXTERNAL;
  1721. goto end_nopkt;
  1722. }
  1723. if (vtctx->frame_ct_in == 0) {
  1724. vtctx->first_pts = frame->pts;
  1725. } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
  1726. vtctx->dts_delta = frame->pts - vtctx->first_pts;
  1727. }
  1728. vtctx->frame_ct_in++;
  1729. } else if(!vtctx->flushing) {
  1730. vtctx->flushing = true;
  1731. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1732. kCMTimeIndefinite);
  1733. if (status) {
  1734. av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
  1735. status = AVERROR_EXTERNAL;
  1736. goto end_nopkt;
  1737. }
  1738. }
  1739. *got_packet = 0;
  1740. get_frame = vtctx->dts_delta >= 0 || !frame;
  1741. if (!get_frame) {
  1742. status = 0;
  1743. goto end_nopkt;
  1744. }
  1745. status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
  1746. if (status) goto end_nopkt;
  1747. if (!buf) goto end_nopkt;
  1748. status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
  1749. if (sei) {
  1750. if (sei->data) av_free(sei->data);
  1751. av_free(sei);
  1752. }
  1753. CFRelease(buf);
  1754. if (status) goto end_nopkt;
  1755. *got_packet = 1;
  1756. return 0;
  1757. end_nopkt:
  1758. av_packet_unref(pkt);
  1759. return status;
  1760. }
  1761. static int vtenc_populate_extradata(AVCodecContext *avctx,
  1762. CMVideoCodecType codec_type,
  1763. CFStringRef profile_level,
  1764. CFNumberRef gamma_level,
  1765. CFDictionaryRef enc_info,
  1766. CFDictionaryRef pixel_buffer_info)
  1767. {
  1768. VTEncContext *vtctx = avctx->priv_data;
  1769. AVFrame *frame = av_frame_alloc();
  1770. int y_size = avctx->width * avctx->height;
  1771. int chroma_size = (avctx->width / 2) * (avctx->height / 2);
  1772. CMSampleBufferRef buf = NULL;
  1773. int status;
  1774. if (!frame)
  1775. return AVERROR(ENOMEM);
  1776. frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
  1777. if(!frame->buf[0]){
  1778. status = AVERROR(ENOMEM);
  1779. goto pe_cleanup;
  1780. }
  1781. status = vtenc_create_encoder(avctx,
  1782. codec_type,
  1783. profile_level,
  1784. gamma_level,
  1785. enc_info,
  1786. pixel_buffer_info,
  1787. &vtctx->session);
  1788. if (status)
  1789. goto pe_cleanup;
  1790. frame->data[0] = frame->buf[0]->data;
  1791. memset(frame->data[0], 0, y_size);
  1792. frame->data[1] = frame->buf[0]->data + y_size;
  1793. memset(frame->data[1], 128, chroma_size);
  1794. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  1795. frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
  1796. memset(frame->data[2], 128, chroma_size);
  1797. }
  1798. frame->linesize[0] = avctx->width;
  1799. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  1800. frame->linesize[1] =
  1801. frame->linesize[2] = (avctx->width + 1) / 2;
  1802. } else {
  1803. frame->linesize[1] = (avctx->width + 1) / 2;
  1804. }
  1805. frame->format = avctx->pix_fmt;
  1806. frame->width = avctx->width;
  1807. frame->height = avctx->height;
  1808. av_frame_set_colorspace(frame, avctx->colorspace);
  1809. av_frame_set_color_range(frame, avctx->color_range);
  1810. frame->color_trc = avctx->color_trc;
  1811. frame->color_primaries = avctx->color_primaries;
  1812. frame->pts = 0;
  1813. status = vtenc_send_frame(avctx, vtctx, frame);
  1814. if (status) {
  1815. av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
  1816. goto pe_cleanup;
  1817. }
  1818. //Populates extradata - output frames are flushed and param sets are available.
  1819. status = VTCompressionSessionCompleteFrames(vtctx->session,
  1820. kCMTimeIndefinite);
  1821. if (status)
  1822. goto pe_cleanup;
  1823. status = vtenc_q_pop(vtctx, 0, &buf, NULL);
  1824. if (status) {
  1825. av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
  1826. goto pe_cleanup;
  1827. }
  1828. CFRelease(buf);
  1829. pe_cleanup:
  1830. if(vtctx->session)
  1831. CFRelease(vtctx->session);
  1832. vtctx->session = NULL;
  1833. vtctx->frame_ct_out = 0;
  1834. av_frame_unref(frame);
  1835. av_frame_free(&frame);
  1836. av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
  1837. return status;
  1838. }
  1839. static av_cold int vtenc_close(AVCodecContext *avctx)
  1840. {
  1841. VTEncContext *vtctx = avctx->priv_data;
  1842. if(!vtctx->session) return 0;
  1843. VTCompressionSessionCompleteFrames(vtctx->session,
  1844. kCMTimeIndefinite);
  1845. clear_frame_queue(vtctx);
  1846. pthread_cond_destroy(&vtctx->cv_sample_sent);
  1847. pthread_mutex_destroy(&vtctx->lock);
  1848. CFRelease(vtctx->session);
  1849. vtctx->session = NULL;
  1850. if (vtctx->color_primaries) {
  1851. CFRelease(vtctx->color_primaries);
  1852. vtctx->color_primaries = NULL;
  1853. }
  1854. if (vtctx->transfer_function) {
  1855. CFRelease(vtctx->transfer_function);
  1856. vtctx->transfer_function = NULL;
  1857. }
  1858. if (vtctx->ycbcr_matrix) {
  1859. CFRelease(vtctx->ycbcr_matrix);
  1860. vtctx->ycbcr_matrix = NULL;
  1861. }
  1862. return 0;
  1863. }
  1864. static const enum AVPixelFormat pix_fmts[] = {
  1865. AV_PIX_FMT_VIDEOTOOLBOX,
  1866. AV_PIX_FMT_NV12,
  1867. AV_PIX_FMT_YUV420P,
  1868. AV_PIX_FMT_NONE
  1869. };
  1870. #define OFFSET(x) offsetof(VTEncContext, x)
  1871. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  1872. static const AVOption options[] = {
  1873. { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
  1874. { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
  1875. { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
  1876. { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
  1877. { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
  1878. { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
  1879. { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
  1880. { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
  1881. { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
  1882. { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
  1883. { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
  1884. { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
  1885. { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
  1886. { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
  1887. { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
  1888. { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL,
  1889. { .i64 = 0 }, 0, 1, VE },
  1890. { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
  1891. { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  1892. { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
  1893. { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  1894. { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
  1895. { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).",
  1896. OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1897. { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.",
  1898. OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1899. { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.",
  1900. OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
  1901. { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
  1902. { NULL },
  1903. };
  1904. static const AVClass h264_videotoolbox_class = {
  1905. .class_name = "h264_videotoolbox",
  1906. .item_name = av_default_item_name,
  1907. .option = options,
  1908. .version = LIBAVUTIL_VERSION_INT,
  1909. };
  1910. AVCodec ff_h264_videotoolbox_encoder = {
  1911. .name = "h264_videotoolbox",
  1912. .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
  1913. .type = AVMEDIA_TYPE_VIDEO,
  1914. .id = AV_CODEC_ID_H264,
  1915. .priv_data_size = sizeof(VTEncContext),
  1916. .pix_fmts = pix_fmts,
  1917. .init = vtenc_init,
  1918. .encode2 = vtenc_frame,
  1919. .close = vtenc_close,
  1920. .capabilities = AV_CODEC_CAP_DELAY,
  1921. .priv_class = &h264_videotoolbox_class,
  1922. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  1923. FF_CODEC_CAP_INIT_CLEANUP,
  1924. };