You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1224 lines
41KB

  1. /*
  2. * Videotoolbox hardware acceleration
  3. *
  4. * copyright (c) 2012 Sebastien Zwickert
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #include "videotoolbox.h"
  24. #include "libavutil/hwcontext_videotoolbox.h"
  25. #include "vt_internal.h"
  26. #include "libavutil/avutil.h"
  27. #include "libavutil/hwcontext.h"
  28. #include "bytestream.h"
  29. #include "decode.h"
  30. #include "h264dec.h"
  31. #include "hevcdec.h"
  32. #include "mpegvideo.h"
  33. #include <TargetConditionals.h>
  34. #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
  35. # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
  36. #endif
  37. #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
  38. # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
  39. #endif
  40. #if !HAVE_KCMVIDEOCODECTYPE_HEVC
  41. enum { kCMVideoCodecType_HEVC = 'hvc1' };
  42. #endif
  43. #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
  44. typedef struct VTHWFrame {
  45. CVPixelBufferRef pixbuf;
  46. AVBufferRef *hw_frames_ctx;
  47. } VTHWFrame;
  48. static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
  49. {
  50. VTHWFrame *ref = (VTHWFrame *)data;
  51. av_buffer_unref(&ref->hw_frames_ctx);
  52. CVPixelBufferRelease(ref->pixbuf);
  53. av_free(data);
  54. }
  55. static int videotoolbox_buffer_copy(VTContext *vtctx,
  56. const uint8_t *buffer,
  57. uint32_t size)
  58. {
  59. void *tmp;
  60. tmp = av_fast_realloc(vtctx->bitstream,
  61. &vtctx->allocated_size,
  62. size);
  63. if (!tmp)
  64. return AVERROR(ENOMEM);
  65. vtctx->bitstream = tmp;
  66. memcpy(vtctx->bitstream, buffer, size);
  67. vtctx->bitstream_size = size;
  68. return 0;
  69. }
  70. static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
  71. {
  72. VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
  73. if (!ref->pixbuf) {
  74. av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
  75. av_frame_unref(frame);
  76. return AVERROR_EXTERNAL;
  77. }
  78. frame->data[3] = (uint8_t*)ref->pixbuf;
  79. if (ref->hw_frames_ctx) {
  80. av_buffer_unref(&frame->hw_frames_ctx);
  81. frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
  82. if (!frame->hw_frames_ctx)
  83. return AVERROR(ENOMEM);
  84. }
  85. return 0;
  86. }
  87. int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
  88. {
  89. size_t size = sizeof(VTHWFrame);
  90. uint8_t *data = NULL;
  91. AVBufferRef *buf = NULL;
  92. int ret = ff_attach_decode_data(frame);
  93. FrameDecodeData *fdd;
  94. if (ret < 0)
  95. return ret;
  96. data = av_mallocz(size);
  97. if (!data)
  98. return AVERROR(ENOMEM);
  99. buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
  100. if (!buf) {
  101. av_freep(&data);
  102. return AVERROR(ENOMEM);
  103. }
  104. frame->buf[0] = buf;
  105. fdd = (FrameDecodeData*)frame->private_ref->data;
  106. fdd->post_process = videotoolbox_postproc_frame;
  107. frame->width = avctx->width;
  108. frame->height = avctx->height;
  109. frame->format = avctx->pix_fmt;
  110. return 0;
  111. }
  112. #define AV_W8(p, v) *(p) = (v)
  113. CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
  114. {
  115. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  116. H264Context *h = avctx->priv_data;
  117. CFDataRef data = NULL;
  118. uint8_t *p;
  119. int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
  120. uint8_t *vt_extradata = av_malloc(vt_extradata_size);
  121. if (!vt_extradata)
  122. return NULL;
  123. p = vt_extradata;
  124. AV_W8(p + 0, 1); /* version */
  125. AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
  126. AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
  127. AV_W8(p + 3, h->ps.sps->data[3]); /* level */
  128. AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
  129. AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
  130. AV_WB16(p + 6, h->ps.sps->data_size);
  131. memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
  132. p += 8 + h->ps.sps->data_size;
  133. AV_W8(p + 0, 1); /* number of pps */
  134. AV_WB16(p + 1, h->ps.pps->data_size);
  135. memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
  136. p += 3 + h->ps.pps->data_size;
  137. av_assert0(p - vt_extradata == vt_extradata_size);
  138. // save sps header (profile/level) used to create decoder session,
  139. // so we can detect changes and recreate it.
  140. if (vtctx)
  141. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  142. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  143. av_free(vt_extradata);
  144. return data;
  145. }
  146. CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
  147. {
  148. HEVCContext *h = avctx->priv_data;
  149. const HEVCVPS *vps = (const HEVCVPS *)h->ps.vps_list[0]->data;
  150. const HEVCSPS *sps = (const HEVCSPS *)h->ps.sps_list[0]->data;
  151. int i, num_pps = 0;
  152. const HEVCPPS *pps = h->ps.pps;
  153. PTLCommon ptlc = vps->ptl.general_ptl;
  154. VUI vui = sps->vui;
  155. uint8_t parallelismType;
  156. CFDataRef data = NULL;
  157. uint8_t *p;
  158. int vt_extradata_size = 23 + 5 + vps->data_size + 5 + sps->data_size + 3;
  159. uint8_t *vt_extradata;
  160. for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
  161. if (h->ps.pps_list[i]) {
  162. const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
  163. vt_extradata_size += 2 + pps->data_size;
  164. num_pps++;
  165. }
  166. }
  167. vt_extradata = av_malloc(vt_extradata_size);
  168. if (!vt_extradata)
  169. return NULL;
  170. p = vt_extradata;
  171. /* unsigned int(8) configurationVersion = 1; */
  172. AV_W8(p + 0, 1);
  173. /*
  174. * unsigned int(2) general_profile_space;
  175. * unsigned int(1) general_tier_flag;
  176. * unsigned int(5) general_profile_idc;
  177. */
  178. AV_W8(p + 1, ptlc.profile_space << 6 |
  179. ptlc.tier_flag << 5 |
  180. ptlc.profile_idc);
  181. /* unsigned int(32) general_profile_compatibility_flags; */
  182. memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
  183. /* unsigned int(48) general_constraint_indicator_flags; */
  184. AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
  185. ptlc.interlaced_source_flag << 6 |
  186. ptlc.non_packed_constraint_flag << 5 |
  187. ptlc.frame_only_constraint_flag << 4);
  188. AV_W8(p + 7, 0);
  189. AV_WN32(p + 8, 0);
  190. /* unsigned int(8) general_level_idc; */
  191. AV_W8(p + 12, ptlc.level_idc);
  192. /*
  193. * bit(4) reserved = ‘1111’b;
  194. * unsigned int(12) min_spatial_segmentation_idc;
  195. */
  196. AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
  197. AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
  198. /*
  199. * bit(6) reserved = ‘111111’b;
  200. * unsigned int(2) parallelismType;
  201. */
  202. if (!vui.min_spatial_segmentation_idc)
  203. parallelismType = 0;
  204. else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
  205. parallelismType = 0;
  206. else if (pps->entropy_coding_sync_enabled_flag)
  207. parallelismType = 3;
  208. else if (pps->tiles_enabled_flag)
  209. parallelismType = 2;
  210. else
  211. parallelismType = 1;
  212. AV_W8(p + 15, 0xfc | parallelismType);
  213. /*
  214. * bit(6) reserved = ‘111111’b;
  215. * unsigned int(2) chromaFormat;
  216. */
  217. AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
  218. /*
  219. * bit(5) reserved = ‘11111’b;
  220. * unsigned int(3) bitDepthLumaMinus8;
  221. */
  222. AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
  223. /*
  224. * bit(5) reserved = ‘11111’b;
  225. * unsigned int(3) bitDepthChromaMinus8;
  226. */
  227. AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
  228. /* bit(16) avgFrameRate; */
  229. AV_WB16(p + 19, 0);
  230. /*
  231. * bit(2) constantFrameRate;
  232. * bit(3) numTemporalLayers;
  233. * bit(1) temporalIdNested;
  234. * unsigned int(2) lengthSizeMinusOne;
  235. */
  236. AV_W8(p + 21, 0 << 6 |
  237. sps->max_sub_layers << 3 |
  238. sps->temporal_id_nesting_flag << 2 |
  239. 3);
  240. /* unsigned int(8) numOfArrays; */
  241. AV_W8(p + 22, 3);
  242. p += 23;
  243. /* vps */
  244. /*
  245. * bit(1) array_completeness;
  246. * unsigned int(1) reserved = 0;
  247. * unsigned int(6) NAL_unit_type;
  248. */
  249. AV_W8(p, 1 << 7 |
  250. HEVC_NAL_VPS & 0x3f);
  251. /* unsigned int(16) numNalus; */
  252. AV_WB16(p + 1, 1);
  253. /* unsigned int(16) nalUnitLength; */
  254. AV_WB16(p + 3, vps->data_size);
  255. /* bit(8*nalUnitLength) nalUnit; */
  256. memcpy(p + 5, vps->data, vps->data_size);
  257. p += 5 + vps->data_size;
  258. /* sps */
  259. AV_W8(p, 1 << 7 |
  260. HEVC_NAL_SPS & 0x3f);
  261. AV_WB16(p + 1, 1);
  262. AV_WB16(p + 3, sps->data_size);
  263. memcpy(p + 5, sps->data, sps->data_size);
  264. p += 5 + sps->data_size;
  265. /* pps */
  266. AV_W8(p, 1 << 7 |
  267. HEVC_NAL_PPS & 0x3f);
  268. AV_WB16(p + 1, num_pps);
  269. p += 3;
  270. for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
  271. if (h->ps.pps_list[i]) {
  272. const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
  273. AV_WB16(p, pps->data_size);
  274. memcpy(p + 2, pps->data, pps->data_size);
  275. p += 2 + pps->data_size;
  276. }
  277. }
  278. av_assert0(p - vt_extradata == vt_extradata_size);
  279. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  280. av_free(vt_extradata);
  281. return data;
  282. }
  283. int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
  284. const uint8_t *buffer,
  285. uint32_t size)
  286. {
  287. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  288. H264Context *h = avctx->priv_data;
  289. if (h->is_avc == 1) {
  290. return videotoolbox_buffer_copy(vtctx, buffer, size);
  291. }
  292. return 0;
  293. }
  294. static int videotoolbox_h264_decode_params(AVCodecContext *avctx,
  295. int type,
  296. const uint8_t *buffer,
  297. uint32_t size)
  298. {
  299. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  300. H264Context *h = avctx->priv_data;
  301. // save sps header (profile/level) used to create decoder session
  302. if (!vtctx->sps[0])
  303. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  304. if (type == H264_NAL_SPS) {
  305. if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
  306. vtctx->reconfig_needed = true;
  307. memcpy(vtctx->sps, buffer + 1, 3);
  308. }
  309. }
  310. // pass-through SPS/PPS changes to the decoder
  311. return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
  312. }
  313. static int videotoolbox_common_decode_slice(AVCodecContext *avctx,
  314. const uint8_t *buffer,
  315. uint32_t size)
  316. {
  317. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  318. void *tmp;
  319. tmp = av_fast_realloc(vtctx->bitstream,
  320. &vtctx->allocated_size,
  321. vtctx->bitstream_size+size+4);
  322. if (!tmp)
  323. return AVERROR(ENOMEM);
  324. vtctx->bitstream = tmp;
  325. AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
  326. memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
  327. vtctx->bitstream_size += size + 4;
  328. return 0;
  329. }
  330. int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
  331. const uint8_t *buffer,
  332. uint32_t size)
  333. {
  334. H264Context *h = avctx->priv_data;
  335. if (h->is_avc == 1)
  336. return 0;
  337. return videotoolbox_common_decode_slice(avctx, buffer, size);
  338. }
  339. int ff_videotoolbox_uninit(AVCodecContext *avctx)
  340. {
  341. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  342. if (vtctx) {
  343. av_freep(&vtctx->bitstream);
  344. if (vtctx->frame)
  345. CVPixelBufferRelease(vtctx->frame);
  346. }
  347. return 0;
  348. }
  349. #if CONFIG_VIDEOTOOLBOX
  350. // Return the AVVideotoolboxContext that matters currently. Where it comes from
  351. // depends on the API used.
  352. static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
  353. {
  354. // Somewhat tricky because the user can call av_videotoolbox_default_free()
  355. // at any time, even when the codec is closed.
  356. if (avctx->internal && avctx->internal->hwaccel_priv_data) {
  357. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  358. if (vtctx->vt_ctx)
  359. return vtctx->vt_ctx;
  360. }
  361. return avctx->hwaccel_context;
  362. }
  363. static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
  364. {
  365. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  366. CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
  367. OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
  368. enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
  369. int width = CVPixelBufferGetWidth(pixbuf);
  370. int height = CVPixelBufferGetHeight(pixbuf);
  371. AVHWFramesContext *cached_frames;
  372. VTHWFrame *ref;
  373. int ret;
  374. if (!frame->buf[0] || frame->data[3]) {
  375. av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
  376. av_frame_unref(frame);
  377. return AVERROR_EXTERNAL;
  378. }
  379. ref = (VTHWFrame *)frame->buf[0]->data;
  380. if (ref->pixbuf)
  381. CVPixelBufferRelease(ref->pixbuf);
  382. ref->pixbuf = vtctx->frame;
  383. vtctx->frame = NULL;
  384. // Old API code path.
  385. if (!vtctx->cached_hw_frames_ctx)
  386. return 0;
  387. cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
  388. if (cached_frames->sw_format != sw_format ||
  389. cached_frames->width != width ||
  390. cached_frames->height != height) {
  391. AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
  392. AVHWFramesContext *hw_frames;
  393. if (!hw_frames_ctx)
  394. return AVERROR(ENOMEM);
  395. hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
  396. hw_frames->format = cached_frames->format;
  397. hw_frames->sw_format = sw_format;
  398. hw_frames->width = width;
  399. hw_frames->height = height;
  400. ret = av_hwframe_ctx_init(hw_frames_ctx);
  401. if (ret < 0) {
  402. av_buffer_unref(&hw_frames_ctx);
  403. return ret;
  404. }
  405. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  406. vtctx->cached_hw_frames_ctx = hw_frames_ctx;
  407. }
  408. av_buffer_unref(&ref->hw_frames_ctx);
  409. ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
  410. if (!ref->hw_frames_ctx)
  411. return AVERROR(ENOMEM);
  412. return 0;
  413. }
  414. static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
  415. {
  416. int i;
  417. uint8_t b;
  418. for (i = 3; i >= 0; i--) {
  419. b = (length >> (i * 7)) & 0x7F;
  420. if (i != 0)
  421. b |= 0x80;
  422. bytestream2_put_byteu(pb, b);
  423. }
  424. }
  425. static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
  426. {
  427. CFDataRef data;
  428. uint8_t *rw_extradata;
  429. PutByteContext pb;
  430. int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
  431. // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
  432. int config_size = 13 + 5 + avctx->extradata_size;
  433. int s;
  434. if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
  435. return NULL;
  436. bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
  437. bytestream2_put_byteu(&pb, 0); // version
  438. bytestream2_put_ne24(&pb, 0); // flags
  439. // elementary stream descriptor
  440. bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
  441. videotoolbox_write_mp4_descr_length(&pb, full_size);
  442. bytestream2_put_ne16(&pb, 0); // esid
  443. bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
  444. // decoder configuration descriptor
  445. bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
  446. videotoolbox_write_mp4_descr_length(&pb, config_size);
  447. bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
  448. bytestream2_put_byteu(&pb, 0x11); // stream type
  449. bytestream2_put_ne24(&pb, 0); // buffer size
  450. bytestream2_put_ne32(&pb, 0); // max bitrate
  451. bytestream2_put_ne32(&pb, 0); // avg bitrate
  452. // decoder specific descriptor
  453. bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
  454. videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
  455. bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
  456. // SLConfigDescriptor
  457. bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
  458. bytestream2_put_byteu(&pb, 0x01); // length
  459. bytestream2_put_byteu(&pb, 0x02); //
  460. s = bytestream2_size_p(&pb);
  461. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
  462. av_freep(&rw_extradata);
  463. return data;
  464. }
  465. static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
  466. void *buffer,
  467. int size)
  468. {
  469. OSStatus status;
  470. CMBlockBufferRef block_buf;
  471. CMSampleBufferRef sample_buf;
  472. block_buf = NULL;
  473. sample_buf = NULL;
  474. status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
  475. buffer, // memoryBlock
  476. size, // blockLength
  477. kCFAllocatorNull, // blockAllocator
  478. NULL, // customBlockSource
  479. 0, // offsetToData
  480. size, // dataLength
  481. 0, // flags
  482. &block_buf);
  483. if (!status) {
  484. status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
  485. block_buf, // dataBuffer
  486. TRUE, // dataReady
  487. 0, // makeDataReadyCallback
  488. 0, // makeDataReadyRefcon
  489. fmt_desc, // formatDescription
  490. 1, // numSamples
  491. 0, // numSampleTimingEntries
  492. NULL, // sampleTimingArray
  493. 0, // numSampleSizeEntries
  494. NULL, // sampleSizeArray
  495. &sample_buf);
  496. }
  497. if (block_buf)
  498. CFRelease(block_buf);
  499. return sample_buf;
  500. }
  501. static void videotoolbox_decoder_callback(void *opaque,
  502. void *sourceFrameRefCon,
  503. OSStatus status,
  504. VTDecodeInfoFlags flags,
  505. CVImageBufferRef image_buffer,
  506. CMTime pts,
  507. CMTime duration)
  508. {
  509. AVCodecContext *avctx = opaque;
  510. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  511. if (vtctx->frame) {
  512. CVPixelBufferRelease(vtctx->frame);
  513. vtctx->frame = NULL;
  514. }
  515. if (!image_buffer) {
  516. av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
  517. return;
  518. }
  519. vtctx->frame = CVPixelBufferRetain(image_buffer);
  520. }
  521. static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
  522. {
  523. OSStatus status;
  524. CMSampleBufferRef sample_buf;
  525. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  526. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  527. sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
  528. vtctx->bitstream,
  529. vtctx->bitstream_size);
  530. if (!sample_buf)
  531. return -1;
  532. status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
  533. sample_buf,
  534. 0, // decodeFlags
  535. NULL, // sourceFrameRefCon
  536. 0); // infoFlagsOut
  537. if (status == noErr)
  538. status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
  539. CFRelease(sample_buf);
  540. return status;
  541. }
  542. static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
  543. CFDictionaryRef decoder_spec,
  544. int width,
  545. int height)
  546. {
  547. CMFormatDescriptionRef cm_fmt_desc;
  548. OSStatus status;
  549. status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
  550. codec_type,
  551. width,
  552. height,
  553. decoder_spec, // Dictionary of extension
  554. &cm_fmt_desc);
  555. if (status)
  556. return NULL;
  557. return cm_fmt_desc;
  558. }
  559. static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
  560. int height,
  561. OSType pix_fmt)
  562. {
  563. CFMutableDictionaryRef buffer_attributes;
  564. CFMutableDictionaryRef io_surface_properties;
  565. CFNumberRef cv_pix_fmt;
  566. CFNumberRef w;
  567. CFNumberRef h;
  568. w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
  569. h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
  570. cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
  571. buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
  572. 4,
  573. &kCFTypeDictionaryKeyCallBacks,
  574. &kCFTypeDictionaryValueCallBacks);
  575. io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
  576. 0,
  577. &kCFTypeDictionaryKeyCallBacks,
  578. &kCFTypeDictionaryValueCallBacks);
  579. if (pix_fmt)
  580. CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
  581. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
  582. CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
  583. CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
  584. #if TARGET_OS_IPHONE
  585. CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
  586. #else
  587. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
  588. #endif
  589. CFRelease(io_surface_properties);
  590. CFRelease(cv_pix_fmt);
  591. CFRelease(w);
  592. CFRelease(h);
  593. return buffer_attributes;
  594. }
  595. static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
  596. AVCodecContext *avctx)
  597. {
  598. CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  599. 0,
  600. &kCFTypeDictionaryKeyCallBacks,
  601. &kCFTypeDictionaryValueCallBacks);
  602. CFDictionarySetValue(config_info,
  603. codec_type == kCMVideoCodecType_HEVC ?
  604. kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder :
  605. kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
  606. kCFBooleanTrue);
  607. CFMutableDictionaryRef avc_info;
  608. CFDataRef data = NULL;
  609. avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  610. 1,
  611. &kCFTypeDictionaryKeyCallBacks,
  612. &kCFTypeDictionaryValueCallBacks);
  613. switch (codec_type) {
  614. case kCMVideoCodecType_MPEG4Video :
  615. if (avctx->extradata_size)
  616. data = videotoolbox_esds_extradata_create(avctx);
  617. if (data)
  618. CFDictionarySetValue(avc_info, CFSTR("esds"), data);
  619. break;
  620. case kCMVideoCodecType_H264 :
  621. data = ff_videotoolbox_avcc_extradata_create(avctx);
  622. if (data)
  623. CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
  624. break;
  625. case kCMVideoCodecType_HEVC :
  626. data = ff_videotoolbox_hvcc_extradata_create(avctx);
  627. if (data)
  628. CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
  629. break;
  630. default:
  631. break;
  632. }
  633. CFDictionarySetValue(config_info,
  634. kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
  635. avc_info);
  636. if (data)
  637. CFRelease(data);
  638. CFRelease(avc_info);
  639. return config_info;
  640. }
  641. static int videotoolbox_start(AVCodecContext *avctx)
  642. {
  643. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  644. OSStatus status;
  645. VTDecompressionOutputCallbackRecord decoder_cb;
  646. CFDictionaryRef decoder_spec;
  647. CFDictionaryRef buf_attr;
  648. if (!videotoolbox) {
  649. av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
  650. return -1;
  651. }
  652. switch( avctx->codec_id ) {
  653. case AV_CODEC_ID_H263 :
  654. videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
  655. break;
  656. case AV_CODEC_ID_H264 :
  657. videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
  658. break;
  659. case AV_CODEC_ID_HEVC :
  660. videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
  661. break;
  662. case AV_CODEC_ID_MPEG1VIDEO :
  663. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
  664. break;
  665. case AV_CODEC_ID_MPEG2VIDEO :
  666. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
  667. break;
  668. case AV_CODEC_ID_MPEG4 :
  669. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
  670. break;
  671. default :
  672. break;
  673. }
  674. decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
  675. if (!decoder_spec) {
  676. av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
  677. return -1;
  678. }
  679. videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
  680. decoder_spec,
  681. avctx->width,
  682. avctx->height);
  683. if (!videotoolbox->cm_fmt_desc) {
  684. if (decoder_spec)
  685. CFRelease(decoder_spec);
  686. av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
  687. return -1;
  688. }
  689. buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
  690. avctx->height,
  691. videotoolbox->cv_pix_fmt_type);
  692. decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
  693. decoder_cb.decompressionOutputRefCon = avctx;
  694. status = VTDecompressionSessionCreate(NULL, // allocator
  695. videotoolbox->cm_fmt_desc, // videoFormatDescription
  696. decoder_spec, // videoDecoderSpecification
  697. buf_attr, // destinationImageBufferAttributes
  698. &decoder_cb, // outputCallback
  699. &videotoolbox->session); // decompressionSessionOut
  700. if (decoder_spec)
  701. CFRelease(decoder_spec);
  702. if (buf_attr)
  703. CFRelease(buf_attr);
  704. switch (status) {
  705. case kVTVideoDecoderNotAvailableNowErr:
  706. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
  707. return AVERROR(ENOSYS);
  708. case kVTVideoDecoderUnsupportedDataFormatErr:
  709. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
  710. return AVERROR(ENOSYS);
  711. case kVTCouldNotFindVideoDecoderErr:
  712. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
  713. return AVERROR(ENOSYS);
  714. case kVTVideoDecoderMalfunctionErr:
  715. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
  716. return AVERROR(EINVAL);
  717. case kVTVideoDecoderBadDataErr:
  718. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
  719. return AVERROR_INVALIDDATA;
  720. case 0:
  721. return 0;
  722. default:
  723. av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
  724. return AVERROR_UNKNOWN;
  725. }
  726. }
  727. static void videotoolbox_stop(AVCodecContext *avctx)
  728. {
  729. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  730. if (!videotoolbox)
  731. return;
  732. if (videotoolbox->cm_fmt_desc) {
  733. CFRelease(videotoolbox->cm_fmt_desc);
  734. videotoolbox->cm_fmt_desc = NULL;
  735. }
  736. if (videotoolbox->session) {
  737. VTDecompressionSessionInvalidate(videotoolbox->session);
  738. CFRelease(videotoolbox->session);
  739. videotoolbox->session = NULL;
  740. }
  741. }
  742. static const char *videotoolbox_error_string(OSStatus status)
  743. {
  744. switch (status) {
  745. case kVTVideoDecoderBadDataErr:
  746. return "bad data";
  747. case kVTVideoDecoderMalfunctionErr:
  748. return "decoder malfunction";
  749. case kVTInvalidSessionErr:
  750. return "invalid session";
  751. }
  752. return "unknown";
  753. }
  754. static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
  755. {
  756. OSStatus status;
  757. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  758. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  759. if (vtctx->reconfig_needed == true) {
  760. vtctx->reconfig_needed = false;
  761. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
  762. videotoolbox_stop(avctx);
  763. if (videotoolbox_start(avctx) != 0) {
  764. return AVERROR_EXTERNAL;
  765. }
  766. }
  767. if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
  768. return AVERROR_INVALIDDATA;
  769. status = videotoolbox_session_decode_frame(avctx);
  770. if (status != noErr) {
  771. if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
  772. vtctx->reconfig_needed = true;
  773. av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
  774. return AVERROR_UNKNOWN;
  775. }
  776. if (!vtctx->frame) {
  777. vtctx->reconfig_needed = true;
  778. return AVERROR_UNKNOWN;
  779. }
  780. return videotoolbox_buffer_create(avctx, frame);
  781. }
  782. static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
  783. {
  784. H264Context *h = avctx->priv_data;
  785. AVFrame *frame = h->cur_pic_ptr->f;
  786. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  787. int ret = videotoolbox_common_end_frame(avctx, frame);
  788. vtctx->bitstream_size = 0;
  789. return ret;
  790. }
  791. static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
  792. const uint8_t *buffer,
  793. uint32_t size)
  794. {
  795. return 0;
  796. }
  797. static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
  798. const uint8_t *buffer,
  799. uint32_t size)
  800. {
  801. return videotoolbox_common_decode_slice(avctx, buffer, size);
  802. }
  803. static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
  804. int type,
  805. const uint8_t *buffer,
  806. uint32_t size)
  807. {
  808. return videotoolbox_common_decode_slice(avctx, buffer, size);
  809. }
  810. static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
  811. {
  812. HEVCContext *h = avctx->priv_data;
  813. AVFrame *frame = h->ref->frame;
  814. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  815. int ret = videotoolbox_common_end_frame(avctx, frame);
  816. vtctx->bitstream_size = 0;
  817. return ret;
  818. }
  819. static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
  820. const uint8_t *buffer,
  821. uint32_t size)
  822. {
  823. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  824. return videotoolbox_buffer_copy(vtctx, buffer, size);
  825. }
  826. static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
  827. const uint8_t *buffer,
  828. uint32_t size)
  829. {
  830. return 0;
  831. }
  832. static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
  833. {
  834. MpegEncContext *s = avctx->priv_data;
  835. AVFrame *frame = s->current_picture_ptr->f;
  836. return videotoolbox_common_end_frame(avctx, frame);
  837. }
  838. static int videotoolbox_uninit(AVCodecContext *avctx)
  839. {
  840. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  841. if (!vtctx)
  842. return 0;
  843. ff_videotoolbox_uninit(avctx);
  844. if (vtctx->vt_ctx)
  845. videotoolbox_stop(avctx);
  846. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  847. av_freep(&vtctx->vt_ctx);
  848. return 0;
  849. }
  850. static int videotoolbox_common_init(AVCodecContext *avctx)
  851. {
  852. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  853. AVHWFramesContext *hw_frames;
  854. int err;
  855. // Old API - do nothing.
  856. if (avctx->hwaccel_context)
  857. return 0;
  858. if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
  859. av_log(avctx, AV_LOG_ERROR,
  860. "Either hw_frames_ctx or hw_device_ctx must be set.\n");
  861. return AVERROR(EINVAL);
  862. }
  863. vtctx->vt_ctx = av_videotoolbox_alloc_context();
  864. if (!vtctx->vt_ctx) {
  865. err = AVERROR(ENOMEM);
  866. goto fail;
  867. }
  868. if (avctx->hw_frames_ctx) {
  869. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  870. } else {
  871. avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
  872. if (!avctx->hw_frames_ctx) {
  873. err = AVERROR(ENOMEM);
  874. goto fail;
  875. }
  876. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  877. hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
  878. hw_frames->sw_format = AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
  879. hw_frames->width = avctx->width;
  880. hw_frames->height = avctx->height;
  881. err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
  882. if (err < 0) {
  883. av_buffer_unref(&avctx->hw_frames_ctx);
  884. goto fail;
  885. }
  886. }
  887. vtctx->cached_hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
  888. if (!vtctx->cached_hw_frames_ctx) {
  889. err = AVERROR(ENOMEM);
  890. goto fail;
  891. }
  892. vtctx->vt_ctx->cv_pix_fmt_type =
  893. av_map_videotoolbox_format_from_pixfmt(hw_frames->sw_format);
  894. if (!vtctx->vt_ctx->cv_pix_fmt_type) {
  895. av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
  896. err = AVERROR(EINVAL);
  897. goto fail;
  898. }
  899. err = videotoolbox_start(avctx);
  900. if (err < 0)
  901. goto fail;
  902. return 0;
  903. fail:
  904. videotoolbox_uninit(avctx);
  905. return err;
  906. }
  907. static int videotoolbox_frame_params(AVCodecContext *avctx,
  908. AVBufferRef *hw_frames_ctx)
  909. {
  910. AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
  911. frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
  912. frames_ctx->width = avctx->coded_width;
  913. frames_ctx->height = avctx->coded_height;
  914. frames_ctx->sw_format = AV_PIX_FMT_NV12;
  915. return 0;
  916. }
  917. const AVHWAccel ff_h263_videotoolbox_hwaccel = {
  918. .name = "h263_videotoolbox",
  919. .type = AVMEDIA_TYPE_VIDEO,
  920. .id = AV_CODEC_ID_H263,
  921. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  922. .alloc_frame = ff_videotoolbox_alloc_frame,
  923. .start_frame = videotoolbox_mpeg_start_frame,
  924. .decode_slice = videotoolbox_mpeg_decode_slice,
  925. .end_frame = videotoolbox_mpeg_end_frame,
  926. .frame_params = videotoolbox_frame_params,
  927. .init = videotoolbox_common_init,
  928. .uninit = videotoolbox_uninit,
  929. .priv_data_size = sizeof(VTContext),
  930. };
  931. const AVHWAccel ff_hevc_videotoolbox_hwaccel = {
  932. .name = "hevc_videotoolbox",
  933. .type = AVMEDIA_TYPE_VIDEO,
  934. .id = AV_CODEC_ID_HEVC,
  935. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  936. .alloc_frame = ff_videotoolbox_alloc_frame,
  937. .start_frame = videotoolbox_hevc_start_frame,
  938. .decode_slice = videotoolbox_hevc_decode_slice,
  939. .decode_params = videotoolbox_hevc_decode_params,
  940. .end_frame = videotoolbox_hevc_end_frame,
  941. .frame_params = videotoolbox_frame_params,
  942. .init = videotoolbox_common_init,
  943. .uninit = ff_videotoolbox_uninit,
  944. .priv_data_size = sizeof(VTContext),
  945. };
  946. const AVHWAccel ff_h264_videotoolbox_hwaccel = {
  947. .name = "h264_videotoolbox",
  948. .type = AVMEDIA_TYPE_VIDEO,
  949. .id = AV_CODEC_ID_H264,
  950. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  951. .alloc_frame = ff_videotoolbox_alloc_frame,
  952. .start_frame = ff_videotoolbox_h264_start_frame,
  953. .decode_slice = ff_videotoolbox_h264_decode_slice,
  954. .decode_params = videotoolbox_h264_decode_params,
  955. .end_frame = videotoolbox_h264_end_frame,
  956. .frame_params = videotoolbox_frame_params,
  957. .init = videotoolbox_common_init,
  958. .uninit = videotoolbox_uninit,
  959. .priv_data_size = sizeof(VTContext),
  960. };
  961. const AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
  962. .name = "mpeg1_videotoolbox",
  963. .type = AVMEDIA_TYPE_VIDEO,
  964. .id = AV_CODEC_ID_MPEG1VIDEO,
  965. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  966. .alloc_frame = ff_videotoolbox_alloc_frame,
  967. .start_frame = videotoolbox_mpeg_start_frame,
  968. .decode_slice = videotoolbox_mpeg_decode_slice,
  969. .end_frame = videotoolbox_mpeg_end_frame,
  970. .frame_params = videotoolbox_frame_params,
  971. .init = videotoolbox_common_init,
  972. .uninit = videotoolbox_uninit,
  973. .priv_data_size = sizeof(VTContext),
  974. };
  975. const AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
  976. .name = "mpeg2_videotoolbox",
  977. .type = AVMEDIA_TYPE_VIDEO,
  978. .id = AV_CODEC_ID_MPEG2VIDEO,
  979. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  980. .alloc_frame = ff_videotoolbox_alloc_frame,
  981. .start_frame = videotoolbox_mpeg_start_frame,
  982. .decode_slice = videotoolbox_mpeg_decode_slice,
  983. .end_frame = videotoolbox_mpeg_end_frame,
  984. .frame_params = videotoolbox_frame_params,
  985. .init = videotoolbox_common_init,
  986. .uninit = videotoolbox_uninit,
  987. .priv_data_size = sizeof(VTContext),
  988. };
  989. const AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
  990. .name = "mpeg4_videotoolbox",
  991. .type = AVMEDIA_TYPE_VIDEO,
  992. .id = AV_CODEC_ID_MPEG4,
  993. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  994. .alloc_frame = ff_videotoolbox_alloc_frame,
  995. .start_frame = videotoolbox_mpeg_start_frame,
  996. .decode_slice = videotoolbox_mpeg_decode_slice,
  997. .end_frame = videotoolbox_mpeg_end_frame,
  998. .frame_params = videotoolbox_frame_params,
  999. .init = videotoolbox_common_init,
  1000. .uninit = videotoolbox_uninit,
  1001. .priv_data_size = sizeof(VTContext),
  1002. };
  1003. AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
  1004. {
  1005. AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
  1006. if (ret) {
  1007. ret->output_callback = videotoolbox_decoder_callback;
  1008. ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  1009. }
  1010. return ret;
  1011. }
  1012. int av_videotoolbox_default_init(AVCodecContext *avctx)
  1013. {
  1014. return av_videotoolbox_default_init2(avctx, NULL);
  1015. }
  1016. int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
  1017. {
  1018. avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
  1019. if (!avctx->hwaccel_context)
  1020. return AVERROR(ENOMEM);
  1021. return videotoolbox_start(avctx);
  1022. }
  1023. void av_videotoolbox_default_free(AVCodecContext *avctx)
  1024. {
  1025. videotoolbox_stop(avctx);
  1026. av_freep(&avctx->hwaccel_context);
  1027. }
  1028. #endif /* CONFIG_VIDEOTOOLBOX */