You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1229 lines
41KB

  1. /*
  2. * Videotoolbox hardware acceleration
  3. *
  4. * copyright (c) 2012 Sebastien Zwickert
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #include "videotoolbox.h"
  24. #include "libavutil/hwcontext_videotoolbox.h"
  25. #include "vt_internal.h"
  26. #include "libavutil/avutil.h"
  27. #include "libavutil/hwcontext.h"
  28. #include "bytestream.h"
  29. #include "decode.h"
  30. #include "h264dec.h"
  31. #include "hevcdec.h"
  32. #include "mpegvideo.h"
  33. #include <TargetConditionals.h>
  34. #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
  35. # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
  36. #endif
  37. #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
  38. # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
  39. #endif
  40. #if !HAVE_KCMVIDEOCODECTYPE_HEVC
  41. enum { kCMVideoCodecType_HEVC = 'hvc1' };
  42. #endif
  43. #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
  44. typedef struct VTHWFrame {
  45. CVPixelBufferRef pixbuf;
  46. AVBufferRef *hw_frames_ctx;
  47. } VTHWFrame;
  48. static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
  49. {
  50. VTHWFrame *ref = (VTHWFrame *)data;
  51. av_buffer_unref(&ref->hw_frames_ctx);
  52. CVPixelBufferRelease(ref->pixbuf);
  53. av_free(data);
  54. }
  55. static int videotoolbox_buffer_copy(VTContext *vtctx,
  56. const uint8_t *buffer,
  57. uint32_t size)
  58. {
  59. void *tmp;
  60. tmp = av_fast_realloc(vtctx->bitstream,
  61. &vtctx->allocated_size,
  62. size);
  63. if (!tmp)
  64. return AVERROR(ENOMEM);
  65. vtctx->bitstream = tmp;
  66. memcpy(vtctx->bitstream, buffer, size);
  67. vtctx->bitstream_size = size;
  68. return 0;
  69. }
  70. static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
  71. {
  72. VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
  73. if (!ref->pixbuf) {
  74. av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
  75. av_frame_unref(frame);
  76. return AVERROR_EXTERNAL;
  77. }
  78. frame->data[3] = (uint8_t*)ref->pixbuf;
  79. if (ref->hw_frames_ctx) {
  80. av_buffer_unref(&frame->hw_frames_ctx);
  81. frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
  82. if (!frame->hw_frames_ctx)
  83. return AVERROR(ENOMEM);
  84. }
  85. return 0;
  86. }
  87. int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
  88. {
  89. size_t size = sizeof(VTHWFrame);
  90. uint8_t *data = NULL;
  91. AVBufferRef *buf = NULL;
  92. int ret = ff_attach_decode_data(frame);
  93. FrameDecodeData *fdd;
  94. if (ret < 0)
  95. return ret;
  96. data = av_mallocz(size);
  97. if (!data)
  98. return AVERROR(ENOMEM);
  99. buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
  100. if (!buf) {
  101. av_freep(&data);
  102. return AVERROR(ENOMEM);
  103. }
  104. frame->buf[0] = buf;
  105. fdd = (FrameDecodeData*)frame->private_ref->data;
  106. fdd->post_process = videotoolbox_postproc_frame;
  107. frame->width = avctx->width;
  108. frame->height = avctx->height;
  109. frame->format = avctx->pix_fmt;
  110. return 0;
  111. }
  112. #define AV_W8(p, v) *(p) = (v)
  113. CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
  114. {
  115. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  116. H264Context *h = avctx->priv_data;
  117. CFDataRef data = NULL;
  118. uint8_t *p;
  119. int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
  120. uint8_t *vt_extradata = av_malloc(vt_extradata_size);
  121. if (!vt_extradata)
  122. return NULL;
  123. p = vt_extradata;
  124. AV_W8(p + 0, 1); /* version */
  125. AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
  126. AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
  127. AV_W8(p + 3, h->ps.sps->data[3]); /* level */
  128. AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
  129. AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
  130. AV_WB16(p + 6, h->ps.sps->data_size);
  131. memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
  132. p += 8 + h->ps.sps->data_size;
  133. AV_W8(p + 0, 1); /* number of pps */
  134. AV_WB16(p + 1, h->ps.pps->data_size);
  135. memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
  136. p += 3 + h->ps.pps->data_size;
  137. av_assert0(p - vt_extradata == vt_extradata_size);
  138. // save sps header (profile/level) used to create decoder session,
  139. // so we can detect changes and recreate it.
  140. if (vtctx)
  141. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  142. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  143. av_free(vt_extradata);
  144. return data;
  145. }
  146. CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
  147. {
  148. HEVCContext *h = avctx->priv_data;
  149. int i, num_vps = 0, num_sps = 0, num_pps = 0;
  150. const HEVCVPS *vps = h->ps.vps;
  151. const HEVCSPS *sps = h->ps.sps;
  152. const HEVCPPS *pps = h->ps.pps;
  153. PTLCommon ptlc = vps->ptl.general_ptl;
  154. VUI vui = sps->vui;
  155. uint8_t parallelismType;
  156. CFDataRef data = NULL;
  157. uint8_t *p;
  158. int vt_extradata_size = 23 + 3 + 3 + 3;
  159. uint8_t *vt_extradata;
  160. #define COUNT_SIZE_PS(T, t) \
  161. for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
  162. if (h->ps.t##ps_list[i]) { \
  163. const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
  164. vt_extradata_size += 2 + lps->data_size; \
  165. num_##t##ps++; \
  166. } \
  167. }
  168. COUNT_SIZE_PS(V, v)
  169. COUNT_SIZE_PS(S, s)
  170. COUNT_SIZE_PS(P, p)
  171. vt_extradata = av_malloc(vt_extradata_size);
  172. if (!vt_extradata)
  173. return NULL;
  174. p = vt_extradata;
  175. /* unsigned int(8) configurationVersion = 1; */
  176. AV_W8(p + 0, 1);
  177. /*
  178. * unsigned int(2) general_profile_space;
  179. * unsigned int(1) general_tier_flag;
  180. * unsigned int(5) general_profile_idc;
  181. */
  182. AV_W8(p + 1, ptlc.profile_space << 6 |
  183. ptlc.tier_flag << 5 |
  184. ptlc.profile_idc);
  185. /* unsigned int(32) general_profile_compatibility_flags; */
  186. memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
  187. /* unsigned int(48) general_constraint_indicator_flags; */
  188. AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
  189. ptlc.interlaced_source_flag << 6 |
  190. ptlc.non_packed_constraint_flag << 5 |
  191. ptlc.frame_only_constraint_flag << 4);
  192. AV_W8(p + 7, 0);
  193. AV_WN32(p + 8, 0);
  194. /* unsigned int(8) general_level_idc; */
  195. AV_W8(p + 12, ptlc.level_idc);
  196. /*
  197. * bit(4) reserved = ‘1111’b;
  198. * unsigned int(12) min_spatial_segmentation_idc;
  199. */
  200. AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
  201. AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
  202. /*
  203. * bit(6) reserved = ‘111111’b;
  204. * unsigned int(2) parallelismType;
  205. */
  206. if (!vui.min_spatial_segmentation_idc)
  207. parallelismType = 0;
  208. else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
  209. parallelismType = 0;
  210. else if (pps->entropy_coding_sync_enabled_flag)
  211. parallelismType = 3;
  212. else if (pps->tiles_enabled_flag)
  213. parallelismType = 2;
  214. else
  215. parallelismType = 1;
  216. AV_W8(p + 15, 0xfc | parallelismType);
  217. /*
  218. * bit(6) reserved = ‘111111’b;
  219. * unsigned int(2) chromaFormat;
  220. */
  221. AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
  222. /*
  223. * bit(5) reserved = ‘11111’b;
  224. * unsigned int(3) bitDepthLumaMinus8;
  225. */
  226. AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
  227. /*
  228. * bit(5) reserved = ‘11111’b;
  229. * unsigned int(3) bitDepthChromaMinus8;
  230. */
  231. AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
  232. /* bit(16) avgFrameRate; */
  233. AV_WB16(p + 19, 0);
  234. /*
  235. * bit(2) constantFrameRate;
  236. * bit(3) numTemporalLayers;
  237. * bit(1) temporalIdNested;
  238. * unsigned int(2) lengthSizeMinusOne;
  239. */
  240. AV_W8(p + 21, 0 << 6 |
  241. sps->max_sub_layers << 3 |
  242. sps->temporal_id_nesting_flag << 2 |
  243. 3);
  244. /* unsigned int(8) numOfArrays; */
  245. AV_W8(p + 22, 3);
  246. p += 23;
  247. #define APPEND_PS(T, t) \
  248. /* \
  249. * bit(1) array_completeness; \
  250. * unsigned int(1) reserved = 0; \
  251. * unsigned int(6) NAL_unit_type; \
  252. */ \
  253. AV_W8(p, 1 << 7 | \
  254. HEVC_NAL_##T##PS & 0x3f); \
  255. /* unsigned int(16) numNalus; */ \
  256. AV_WB16(p + 1, num_##t##ps); \
  257. p += 3; \
  258. for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
  259. if (h->ps.t##ps_list[i]) { \
  260. const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
  261. /* unsigned int(16) nalUnitLength; */ \
  262. AV_WB16(p, lps->data_size); \
  263. /* bit(8*nalUnitLength) nalUnit; */ \
  264. memcpy(p + 2, lps->data, lps->data_size); \
  265. p += 2 + lps->data_size; \
  266. } \
  267. }
  268. APPEND_PS(V, v)
  269. APPEND_PS(S, s)
  270. APPEND_PS(P, p)
  271. av_assert0(p - vt_extradata == vt_extradata_size);
  272. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  273. av_free(vt_extradata);
  274. return data;
  275. }
  276. int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
  277. const uint8_t *buffer,
  278. uint32_t size)
  279. {
  280. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  281. H264Context *h = avctx->priv_data;
  282. if (h->is_avc == 1) {
  283. return videotoolbox_buffer_copy(vtctx, buffer, size);
  284. }
  285. return 0;
  286. }
  287. static int videotoolbox_h264_decode_params(AVCodecContext *avctx,
  288. int type,
  289. const uint8_t *buffer,
  290. uint32_t size)
  291. {
  292. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  293. H264Context *h = avctx->priv_data;
  294. // save sps header (profile/level) used to create decoder session
  295. if (!vtctx->sps[0])
  296. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  297. if (type == H264_NAL_SPS) {
  298. if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
  299. vtctx->reconfig_needed = true;
  300. memcpy(vtctx->sps, buffer + 1, 3);
  301. }
  302. }
  303. // pass-through SPS/PPS changes to the decoder
  304. return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
  305. }
  306. static int videotoolbox_common_decode_slice(AVCodecContext *avctx,
  307. const uint8_t *buffer,
  308. uint32_t size)
  309. {
  310. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  311. void *tmp;
  312. tmp = av_fast_realloc(vtctx->bitstream,
  313. &vtctx->allocated_size,
  314. vtctx->bitstream_size+size+4);
  315. if (!tmp)
  316. return AVERROR(ENOMEM);
  317. vtctx->bitstream = tmp;
  318. AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
  319. memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
  320. vtctx->bitstream_size += size + 4;
  321. return 0;
  322. }
  323. int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
  324. const uint8_t *buffer,
  325. uint32_t size)
  326. {
  327. H264Context *h = avctx->priv_data;
  328. if (h->is_avc == 1)
  329. return 0;
  330. return videotoolbox_common_decode_slice(avctx, buffer, size);
  331. }
  332. int ff_videotoolbox_uninit(AVCodecContext *avctx)
  333. {
  334. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  335. if (vtctx) {
  336. av_freep(&vtctx->bitstream);
  337. if (vtctx->frame)
  338. CVPixelBufferRelease(vtctx->frame);
  339. }
  340. return 0;
  341. }
  342. #if CONFIG_VIDEOTOOLBOX
  343. // Return the AVVideotoolboxContext that matters currently. Where it comes from
  344. // depends on the API used.
  345. static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
  346. {
  347. // Somewhat tricky because the user can call av_videotoolbox_default_free()
  348. // at any time, even when the codec is closed.
  349. if (avctx->internal && avctx->internal->hwaccel_priv_data) {
  350. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  351. if (vtctx->vt_ctx)
  352. return vtctx->vt_ctx;
  353. }
  354. return avctx->hwaccel_context;
  355. }
  356. static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
  357. {
  358. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  359. CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
  360. OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
  361. enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
  362. int width = CVPixelBufferGetWidth(pixbuf);
  363. int height = CVPixelBufferGetHeight(pixbuf);
  364. AVHWFramesContext *cached_frames;
  365. VTHWFrame *ref;
  366. int ret;
  367. if (!frame->buf[0] || frame->data[3]) {
  368. av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
  369. av_frame_unref(frame);
  370. return AVERROR_EXTERNAL;
  371. }
  372. ref = (VTHWFrame *)frame->buf[0]->data;
  373. if (ref->pixbuf)
  374. CVPixelBufferRelease(ref->pixbuf);
  375. ref->pixbuf = vtctx->frame;
  376. vtctx->frame = NULL;
  377. // Old API code path.
  378. if (!vtctx->cached_hw_frames_ctx)
  379. return 0;
  380. cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
  381. if (cached_frames->sw_format != sw_format ||
  382. cached_frames->width != width ||
  383. cached_frames->height != height) {
  384. AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
  385. AVHWFramesContext *hw_frames;
  386. if (!hw_frames_ctx)
  387. return AVERROR(ENOMEM);
  388. hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
  389. hw_frames->format = cached_frames->format;
  390. hw_frames->sw_format = sw_format;
  391. hw_frames->width = width;
  392. hw_frames->height = height;
  393. ret = av_hwframe_ctx_init(hw_frames_ctx);
  394. if (ret < 0) {
  395. av_buffer_unref(&hw_frames_ctx);
  396. return ret;
  397. }
  398. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  399. vtctx->cached_hw_frames_ctx = hw_frames_ctx;
  400. }
  401. av_buffer_unref(&ref->hw_frames_ctx);
  402. ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
  403. if (!ref->hw_frames_ctx)
  404. return AVERROR(ENOMEM);
  405. return 0;
  406. }
  407. static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
  408. {
  409. int i;
  410. uint8_t b;
  411. for (i = 3; i >= 0; i--) {
  412. b = (length >> (i * 7)) & 0x7F;
  413. if (i != 0)
  414. b |= 0x80;
  415. bytestream2_put_byteu(pb, b);
  416. }
  417. }
  418. static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
  419. {
  420. CFDataRef data;
  421. uint8_t *rw_extradata;
  422. PutByteContext pb;
  423. int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
  424. // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
  425. int config_size = 13 + 5 + avctx->extradata_size;
  426. int s;
  427. if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
  428. return NULL;
  429. bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
  430. bytestream2_put_byteu(&pb, 0); // version
  431. bytestream2_put_ne24(&pb, 0); // flags
  432. // elementary stream descriptor
  433. bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
  434. videotoolbox_write_mp4_descr_length(&pb, full_size);
  435. bytestream2_put_ne16(&pb, 0); // esid
  436. bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
  437. // decoder configuration descriptor
  438. bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
  439. videotoolbox_write_mp4_descr_length(&pb, config_size);
  440. bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
  441. bytestream2_put_byteu(&pb, 0x11); // stream type
  442. bytestream2_put_ne24(&pb, 0); // buffer size
  443. bytestream2_put_ne32(&pb, 0); // max bitrate
  444. bytestream2_put_ne32(&pb, 0); // avg bitrate
  445. // decoder specific descriptor
  446. bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
  447. videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
  448. bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
  449. // SLConfigDescriptor
  450. bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
  451. bytestream2_put_byteu(&pb, 0x01); // length
  452. bytestream2_put_byteu(&pb, 0x02); //
  453. s = bytestream2_size_p(&pb);
  454. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
  455. av_freep(&rw_extradata);
  456. return data;
  457. }
  458. static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
  459. void *buffer,
  460. int size)
  461. {
  462. OSStatus status;
  463. CMBlockBufferRef block_buf;
  464. CMSampleBufferRef sample_buf;
  465. block_buf = NULL;
  466. sample_buf = NULL;
  467. status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
  468. buffer, // memoryBlock
  469. size, // blockLength
  470. kCFAllocatorNull, // blockAllocator
  471. NULL, // customBlockSource
  472. 0, // offsetToData
  473. size, // dataLength
  474. 0, // flags
  475. &block_buf);
  476. if (!status) {
  477. status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
  478. block_buf, // dataBuffer
  479. TRUE, // dataReady
  480. 0, // makeDataReadyCallback
  481. 0, // makeDataReadyRefcon
  482. fmt_desc, // formatDescription
  483. 1, // numSamples
  484. 0, // numSampleTimingEntries
  485. NULL, // sampleTimingArray
  486. 0, // numSampleSizeEntries
  487. NULL, // sampleSizeArray
  488. &sample_buf);
  489. }
  490. if (block_buf)
  491. CFRelease(block_buf);
  492. return sample_buf;
  493. }
  494. static void videotoolbox_decoder_callback(void *opaque,
  495. void *sourceFrameRefCon,
  496. OSStatus status,
  497. VTDecodeInfoFlags flags,
  498. CVImageBufferRef image_buffer,
  499. CMTime pts,
  500. CMTime duration)
  501. {
  502. AVCodecContext *avctx = opaque;
  503. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  504. if (vtctx->frame) {
  505. CVPixelBufferRelease(vtctx->frame);
  506. vtctx->frame = NULL;
  507. }
  508. if (!image_buffer) {
  509. av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
  510. return;
  511. }
  512. vtctx->frame = CVPixelBufferRetain(image_buffer);
  513. }
  514. static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
  515. {
  516. OSStatus status;
  517. CMSampleBufferRef sample_buf;
  518. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  519. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  520. sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
  521. vtctx->bitstream,
  522. vtctx->bitstream_size);
  523. if (!sample_buf)
  524. return -1;
  525. status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
  526. sample_buf,
  527. 0, // decodeFlags
  528. NULL, // sourceFrameRefCon
  529. 0); // infoFlagsOut
  530. if (status == noErr)
  531. status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
  532. CFRelease(sample_buf);
  533. return status;
  534. }
  535. static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
  536. CFDictionaryRef decoder_spec,
  537. int width,
  538. int height)
  539. {
  540. CMFormatDescriptionRef cm_fmt_desc;
  541. OSStatus status;
  542. status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
  543. codec_type,
  544. width,
  545. height,
  546. decoder_spec, // Dictionary of extension
  547. &cm_fmt_desc);
  548. if (status)
  549. return NULL;
  550. return cm_fmt_desc;
  551. }
  552. static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
  553. int height,
  554. OSType pix_fmt)
  555. {
  556. CFMutableDictionaryRef buffer_attributes;
  557. CFMutableDictionaryRef io_surface_properties;
  558. CFNumberRef cv_pix_fmt;
  559. CFNumberRef w;
  560. CFNumberRef h;
  561. w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
  562. h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
  563. cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
  564. buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
  565. 4,
  566. &kCFTypeDictionaryKeyCallBacks,
  567. &kCFTypeDictionaryValueCallBacks);
  568. io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
  569. 0,
  570. &kCFTypeDictionaryKeyCallBacks,
  571. &kCFTypeDictionaryValueCallBacks);
  572. if (pix_fmt)
  573. CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
  574. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
  575. CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
  576. CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
  577. #if TARGET_OS_IPHONE
  578. CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
  579. #else
  580. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
  581. #endif
  582. CFRelease(io_surface_properties);
  583. CFRelease(cv_pix_fmt);
  584. CFRelease(w);
  585. CFRelease(h);
  586. return buffer_attributes;
  587. }
  588. static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
  589. AVCodecContext *avctx)
  590. {
  591. CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  592. 0,
  593. &kCFTypeDictionaryKeyCallBacks,
  594. &kCFTypeDictionaryValueCallBacks);
  595. CFDictionarySetValue(config_info,
  596. codec_type == kCMVideoCodecType_HEVC ?
  597. kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder :
  598. kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
  599. kCFBooleanTrue);
  600. CFMutableDictionaryRef avc_info;
  601. CFDataRef data = NULL;
  602. avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  603. 1,
  604. &kCFTypeDictionaryKeyCallBacks,
  605. &kCFTypeDictionaryValueCallBacks);
  606. switch (codec_type) {
  607. case kCMVideoCodecType_MPEG4Video :
  608. if (avctx->extradata_size)
  609. data = videotoolbox_esds_extradata_create(avctx);
  610. if (data)
  611. CFDictionarySetValue(avc_info, CFSTR("esds"), data);
  612. break;
  613. case kCMVideoCodecType_H264 :
  614. data = ff_videotoolbox_avcc_extradata_create(avctx);
  615. if (data)
  616. CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
  617. break;
  618. case kCMVideoCodecType_HEVC :
  619. data = ff_videotoolbox_hvcc_extradata_create(avctx);
  620. if (data)
  621. CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
  622. break;
  623. default:
  624. break;
  625. }
  626. CFDictionarySetValue(config_info,
  627. kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
  628. avc_info);
  629. if (data)
  630. CFRelease(data);
  631. CFRelease(avc_info);
  632. return config_info;
  633. }
  634. static int videotoolbox_start(AVCodecContext *avctx)
  635. {
  636. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  637. OSStatus status;
  638. VTDecompressionOutputCallbackRecord decoder_cb;
  639. CFDictionaryRef decoder_spec;
  640. CFDictionaryRef buf_attr;
  641. if (!videotoolbox) {
  642. av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
  643. return -1;
  644. }
  645. switch( avctx->codec_id ) {
  646. case AV_CODEC_ID_H263 :
  647. videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
  648. break;
  649. case AV_CODEC_ID_H264 :
  650. videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
  651. break;
  652. case AV_CODEC_ID_HEVC :
  653. videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
  654. break;
  655. case AV_CODEC_ID_MPEG1VIDEO :
  656. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
  657. break;
  658. case AV_CODEC_ID_MPEG2VIDEO :
  659. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
  660. break;
  661. case AV_CODEC_ID_MPEG4 :
  662. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
  663. break;
  664. default :
  665. break;
  666. }
  667. decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
  668. if (!decoder_spec) {
  669. av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
  670. return -1;
  671. }
  672. videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
  673. decoder_spec,
  674. avctx->width,
  675. avctx->height);
  676. if (!videotoolbox->cm_fmt_desc) {
  677. if (decoder_spec)
  678. CFRelease(decoder_spec);
  679. av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
  680. return -1;
  681. }
  682. buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
  683. avctx->height,
  684. videotoolbox->cv_pix_fmt_type);
  685. decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
  686. decoder_cb.decompressionOutputRefCon = avctx;
  687. status = VTDecompressionSessionCreate(NULL, // allocator
  688. videotoolbox->cm_fmt_desc, // videoFormatDescription
  689. decoder_spec, // videoDecoderSpecification
  690. buf_attr, // destinationImageBufferAttributes
  691. &decoder_cb, // outputCallback
  692. &videotoolbox->session); // decompressionSessionOut
  693. if (decoder_spec)
  694. CFRelease(decoder_spec);
  695. if (buf_attr)
  696. CFRelease(buf_attr);
  697. switch (status) {
  698. case kVTVideoDecoderNotAvailableNowErr:
  699. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
  700. return AVERROR(ENOSYS);
  701. case kVTVideoDecoderUnsupportedDataFormatErr:
  702. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
  703. return AVERROR(ENOSYS);
  704. case kVTCouldNotFindVideoDecoderErr:
  705. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
  706. return AVERROR(ENOSYS);
  707. case kVTVideoDecoderMalfunctionErr:
  708. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
  709. return AVERROR(EINVAL);
  710. case kVTVideoDecoderBadDataErr:
  711. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
  712. return AVERROR_INVALIDDATA;
  713. case 0:
  714. return 0;
  715. default:
  716. av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
  717. return AVERROR_UNKNOWN;
  718. }
  719. }
  720. static void videotoolbox_stop(AVCodecContext *avctx)
  721. {
  722. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  723. if (!videotoolbox)
  724. return;
  725. if (videotoolbox->cm_fmt_desc) {
  726. CFRelease(videotoolbox->cm_fmt_desc);
  727. videotoolbox->cm_fmt_desc = NULL;
  728. }
  729. if (videotoolbox->session) {
  730. VTDecompressionSessionInvalidate(videotoolbox->session);
  731. CFRelease(videotoolbox->session);
  732. videotoolbox->session = NULL;
  733. }
  734. }
  735. static const char *videotoolbox_error_string(OSStatus status)
  736. {
  737. switch (status) {
  738. case kVTVideoDecoderBadDataErr:
  739. return "bad data";
  740. case kVTVideoDecoderMalfunctionErr:
  741. return "decoder malfunction";
  742. case kVTInvalidSessionErr:
  743. return "invalid session";
  744. }
  745. return "unknown";
  746. }
  747. static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
  748. {
  749. OSStatus status;
  750. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  751. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  752. frame->crop_right = 0;
  753. frame->crop_left = 0;
  754. frame->crop_top = 0;
  755. frame->crop_bottom = 0;
  756. if (vtctx->reconfig_needed == true) {
  757. vtctx->reconfig_needed = false;
  758. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
  759. videotoolbox_stop(avctx);
  760. if (videotoolbox_start(avctx) != 0) {
  761. return AVERROR_EXTERNAL;
  762. }
  763. }
  764. if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
  765. return AVERROR_INVALIDDATA;
  766. status = videotoolbox_session_decode_frame(avctx);
  767. if (status != noErr) {
  768. if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
  769. vtctx->reconfig_needed = true;
  770. av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
  771. return AVERROR_UNKNOWN;
  772. }
  773. if (!vtctx->frame) {
  774. vtctx->reconfig_needed = true;
  775. return AVERROR_UNKNOWN;
  776. }
  777. return videotoolbox_buffer_create(avctx, frame);
  778. }
  779. static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
  780. {
  781. H264Context *h = avctx->priv_data;
  782. AVFrame *frame = h->cur_pic_ptr->f;
  783. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  784. int ret = videotoolbox_common_end_frame(avctx, frame);
  785. vtctx->bitstream_size = 0;
  786. return ret;
  787. }
  788. static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
  789. const uint8_t *buffer,
  790. uint32_t size)
  791. {
  792. return 0;
  793. }
  794. static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
  795. const uint8_t *buffer,
  796. uint32_t size)
  797. {
  798. return videotoolbox_common_decode_slice(avctx, buffer, size);
  799. }
  800. static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
  801. int type,
  802. const uint8_t *buffer,
  803. uint32_t size)
  804. {
  805. return videotoolbox_common_decode_slice(avctx, buffer, size);
  806. }
  807. static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
  808. {
  809. HEVCContext *h = avctx->priv_data;
  810. AVFrame *frame = h->ref->frame;
  811. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  812. h->output_frame->crop_right = 0;
  813. h->output_frame->crop_left = 0;
  814. h->output_frame->crop_top = 0;
  815. h->output_frame->crop_bottom = 0;
  816. int ret = videotoolbox_common_end_frame(avctx, frame);
  817. vtctx->bitstream_size = 0;
  818. return ret;
  819. }
  820. static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
  821. const uint8_t *buffer,
  822. uint32_t size)
  823. {
  824. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  825. return videotoolbox_buffer_copy(vtctx, buffer, size);
  826. }
  827. static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
  828. const uint8_t *buffer,
  829. uint32_t size)
  830. {
  831. return 0;
  832. }
  833. static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
  834. {
  835. MpegEncContext *s = avctx->priv_data;
  836. AVFrame *frame = s->current_picture_ptr->f;
  837. return videotoolbox_common_end_frame(avctx, frame);
  838. }
  839. static int videotoolbox_uninit(AVCodecContext *avctx)
  840. {
  841. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  842. if (!vtctx)
  843. return 0;
  844. ff_videotoolbox_uninit(avctx);
  845. if (vtctx->vt_ctx)
  846. videotoolbox_stop(avctx);
  847. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  848. av_freep(&vtctx->vt_ctx);
  849. return 0;
  850. }
  851. static int videotoolbox_common_init(AVCodecContext *avctx)
  852. {
  853. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  854. AVHWFramesContext *hw_frames;
  855. int err;
  856. // Old API - do nothing.
  857. if (avctx->hwaccel_context)
  858. return 0;
  859. if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
  860. av_log(avctx, AV_LOG_ERROR,
  861. "Either hw_frames_ctx or hw_device_ctx must be set.\n");
  862. return AVERROR(EINVAL);
  863. }
  864. vtctx->vt_ctx = av_videotoolbox_alloc_context();
  865. if (!vtctx->vt_ctx) {
  866. err = AVERROR(ENOMEM);
  867. goto fail;
  868. }
  869. if (avctx->hw_frames_ctx) {
  870. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  871. } else {
  872. avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
  873. if (!avctx->hw_frames_ctx) {
  874. err = AVERROR(ENOMEM);
  875. goto fail;
  876. }
  877. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  878. hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
  879. hw_frames->sw_format = AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
  880. hw_frames->width = avctx->width;
  881. hw_frames->height = avctx->height;
  882. err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
  883. if (err < 0) {
  884. av_buffer_unref(&avctx->hw_frames_ctx);
  885. goto fail;
  886. }
  887. }
  888. vtctx->cached_hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
  889. if (!vtctx->cached_hw_frames_ctx) {
  890. err = AVERROR(ENOMEM);
  891. goto fail;
  892. }
  893. vtctx->vt_ctx->cv_pix_fmt_type =
  894. av_map_videotoolbox_format_from_pixfmt(hw_frames->sw_format);
  895. if (!vtctx->vt_ctx->cv_pix_fmt_type) {
  896. av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
  897. err = AVERROR(EINVAL);
  898. goto fail;
  899. }
  900. err = videotoolbox_start(avctx);
  901. if (err < 0)
  902. goto fail;
  903. return 0;
  904. fail:
  905. videotoolbox_uninit(avctx);
  906. return err;
  907. }
  908. static int videotoolbox_frame_params(AVCodecContext *avctx,
  909. AVBufferRef *hw_frames_ctx)
  910. {
  911. AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
  912. frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
  913. frames_ctx->width = avctx->coded_width;
  914. frames_ctx->height = avctx->coded_height;
  915. frames_ctx->sw_format = AV_PIX_FMT_NV12;
  916. return 0;
  917. }
  918. const AVHWAccel ff_h263_videotoolbox_hwaccel = {
  919. .name = "h263_videotoolbox",
  920. .type = AVMEDIA_TYPE_VIDEO,
  921. .id = AV_CODEC_ID_H263,
  922. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  923. .alloc_frame = ff_videotoolbox_alloc_frame,
  924. .start_frame = videotoolbox_mpeg_start_frame,
  925. .decode_slice = videotoolbox_mpeg_decode_slice,
  926. .end_frame = videotoolbox_mpeg_end_frame,
  927. .frame_params = videotoolbox_frame_params,
  928. .init = videotoolbox_common_init,
  929. .uninit = videotoolbox_uninit,
  930. .priv_data_size = sizeof(VTContext),
  931. };
  932. const AVHWAccel ff_hevc_videotoolbox_hwaccel = {
  933. .name = "hevc_videotoolbox",
  934. .type = AVMEDIA_TYPE_VIDEO,
  935. .id = AV_CODEC_ID_HEVC,
  936. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  937. .alloc_frame = ff_videotoolbox_alloc_frame,
  938. .start_frame = videotoolbox_hevc_start_frame,
  939. .decode_slice = videotoolbox_hevc_decode_slice,
  940. .decode_params = videotoolbox_hevc_decode_params,
  941. .end_frame = videotoolbox_hevc_end_frame,
  942. .frame_params = videotoolbox_frame_params,
  943. .init = videotoolbox_common_init,
  944. .uninit = ff_videotoolbox_uninit,
  945. .priv_data_size = sizeof(VTContext),
  946. };
  947. const AVHWAccel ff_h264_videotoolbox_hwaccel = {
  948. .name = "h264_videotoolbox",
  949. .type = AVMEDIA_TYPE_VIDEO,
  950. .id = AV_CODEC_ID_H264,
  951. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  952. .alloc_frame = ff_videotoolbox_alloc_frame,
  953. .start_frame = ff_videotoolbox_h264_start_frame,
  954. .decode_slice = ff_videotoolbox_h264_decode_slice,
  955. .decode_params = videotoolbox_h264_decode_params,
  956. .end_frame = videotoolbox_h264_end_frame,
  957. .frame_params = videotoolbox_frame_params,
  958. .init = videotoolbox_common_init,
  959. .uninit = videotoolbox_uninit,
  960. .priv_data_size = sizeof(VTContext),
  961. };
  962. const AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
  963. .name = "mpeg1_videotoolbox",
  964. .type = AVMEDIA_TYPE_VIDEO,
  965. .id = AV_CODEC_ID_MPEG1VIDEO,
  966. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  967. .alloc_frame = ff_videotoolbox_alloc_frame,
  968. .start_frame = videotoolbox_mpeg_start_frame,
  969. .decode_slice = videotoolbox_mpeg_decode_slice,
  970. .end_frame = videotoolbox_mpeg_end_frame,
  971. .frame_params = videotoolbox_frame_params,
  972. .init = videotoolbox_common_init,
  973. .uninit = videotoolbox_uninit,
  974. .priv_data_size = sizeof(VTContext),
  975. };
  976. const AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
  977. .name = "mpeg2_videotoolbox",
  978. .type = AVMEDIA_TYPE_VIDEO,
  979. .id = AV_CODEC_ID_MPEG2VIDEO,
  980. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  981. .alloc_frame = ff_videotoolbox_alloc_frame,
  982. .start_frame = videotoolbox_mpeg_start_frame,
  983. .decode_slice = videotoolbox_mpeg_decode_slice,
  984. .end_frame = videotoolbox_mpeg_end_frame,
  985. .frame_params = videotoolbox_frame_params,
  986. .init = videotoolbox_common_init,
  987. .uninit = videotoolbox_uninit,
  988. .priv_data_size = sizeof(VTContext),
  989. };
  990. const AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
  991. .name = "mpeg4_videotoolbox",
  992. .type = AVMEDIA_TYPE_VIDEO,
  993. .id = AV_CODEC_ID_MPEG4,
  994. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  995. .alloc_frame = ff_videotoolbox_alloc_frame,
  996. .start_frame = videotoolbox_mpeg_start_frame,
  997. .decode_slice = videotoolbox_mpeg_decode_slice,
  998. .end_frame = videotoolbox_mpeg_end_frame,
  999. .frame_params = videotoolbox_frame_params,
  1000. .init = videotoolbox_common_init,
  1001. .uninit = videotoolbox_uninit,
  1002. .priv_data_size = sizeof(VTContext),
  1003. };
  1004. AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
  1005. {
  1006. AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
  1007. if (ret) {
  1008. ret->output_callback = videotoolbox_decoder_callback;
  1009. ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  1010. }
  1011. return ret;
  1012. }
  1013. int av_videotoolbox_default_init(AVCodecContext *avctx)
  1014. {
  1015. return av_videotoolbox_default_init2(avctx, NULL);
  1016. }
  1017. int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
  1018. {
  1019. avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
  1020. if (!avctx->hwaccel_context)
  1021. return AVERROR(ENOMEM);
  1022. return videotoolbox_start(avctx);
  1023. }
  1024. void av_videotoolbox_default_free(AVCodecContext *avctx)
  1025. {
  1026. videotoolbox_stop(avctx);
  1027. av_freep(&avctx->hwaccel_context);
  1028. }
  1029. #endif /* CONFIG_VIDEOTOOLBOX */