You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1257 lines
42KB

  1. /*
  2. * Videotoolbox hardware acceleration
  3. *
  4. * copyright (c) 2012 Sebastien Zwickert
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #include "videotoolbox.h"
  24. #include "libavutil/hwcontext_videotoolbox.h"
  25. #include "vt_internal.h"
  26. #include "libavutil/avutil.h"
  27. #include "libavutil/hwcontext.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "bytestream.h"
  30. #include "decode.h"
  31. #include "h264dec.h"
  32. #include "hevcdec.h"
  33. #include "mpegvideo.h"
  34. #include <TargetConditionals.h>
  35. #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
  36. # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
  37. #endif
  38. #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
  39. # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
  40. #endif
  41. #if !HAVE_KCMVIDEOCODECTYPE_HEVC
  42. enum { kCMVideoCodecType_HEVC = 'hvc1' };
  43. #endif
  44. #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
  45. typedef struct VTHWFrame {
  46. CVPixelBufferRef pixbuf;
  47. AVBufferRef *hw_frames_ctx;
  48. } VTHWFrame;
  49. static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
  50. {
  51. VTHWFrame *ref = (VTHWFrame *)data;
  52. av_buffer_unref(&ref->hw_frames_ctx);
  53. CVPixelBufferRelease(ref->pixbuf);
  54. av_free(data);
  55. }
  56. static int videotoolbox_buffer_copy(VTContext *vtctx,
  57. const uint8_t *buffer,
  58. uint32_t size)
  59. {
  60. void *tmp;
  61. tmp = av_fast_realloc(vtctx->bitstream,
  62. &vtctx->allocated_size,
  63. size);
  64. if (!tmp)
  65. return AVERROR(ENOMEM);
  66. vtctx->bitstream = tmp;
  67. memcpy(vtctx->bitstream, buffer, size);
  68. vtctx->bitstream_size = size;
  69. return 0;
  70. }
  71. static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
  72. {
  73. VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
  74. if (!ref->pixbuf) {
  75. av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
  76. av_frame_unref(frame);
  77. return AVERROR_EXTERNAL;
  78. }
  79. frame->data[3] = (uint8_t*)ref->pixbuf;
  80. if (ref->hw_frames_ctx) {
  81. av_buffer_unref(&frame->hw_frames_ctx);
  82. frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
  83. if (!frame->hw_frames_ctx)
  84. return AVERROR(ENOMEM);
  85. }
  86. return 0;
  87. }
  88. int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
  89. {
  90. size_t size = sizeof(VTHWFrame);
  91. uint8_t *data = NULL;
  92. AVBufferRef *buf = NULL;
  93. int ret = ff_attach_decode_data(frame);
  94. FrameDecodeData *fdd;
  95. if (ret < 0)
  96. return ret;
  97. data = av_mallocz(size);
  98. if (!data)
  99. return AVERROR(ENOMEM);
  100. buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
  101. if (!buf) {
  102. av_freep(&data);
  103. return AVERROR(ENOMEM);
  104. }
  105. frame->buf[0] = buf;
  106. fdd = (FrameDecodeData*)frame->private_ref->data;
  107. fdd->post_process = videotoolbox_postproc_frame;
  108. frame->width = avctx->width;
  109. frame->height = avctx->height;
  110. frame->format = avctx->pix_fmt;
  111. return 0;
  112. }
  113. #define AV_W8(p, v) *(p) = (v)
  114. CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
  115. {
  116. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  117. H264Context *h = avctx->priv_data;
  118. CFDataRef data = NULL;
  119. uint8_t *p;
  120. int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
  121. uint8_t *vt_extradata = av_malloc(vt_extradata_size);
  122. if (!vt_extradata)
  123. return NULL;
  124. p = vt_extradata;
  125. AV_W8(p + 0, 1); /* version */
  126. AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
  127. AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
  128. AV_W8(p + 3, h->ps.sps->data[3]); /* level */
  129. AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
  130. AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
  131. AV_WB16(p + 6, h->ps.sps->data_size);
  132. memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
  133. p += 8 + h->ps.sps->data_size;
  134. AV_W8(p + 0, 1); /* number of pps */
  135. AV_WB16(p + 1, h->ps.pps->data_size);
  136. memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
  137. p += 3 + h->ps.pps->data_size;
  138. av_assert0(p - vt_extradata == vt_extradata_size);
  139. // save sps header (profile/level) used to create decoder session,
  140. // so we can detect changes and recreate it.
  141. if (vtctx)
  142. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  143. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  144. av_free(vt_extradata);
  145. return data;
  146. }
  147. CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
  148. {
  149. HEVCContext *h = avctx->priv_data;
  150. int i, num_vps = 0, num_sps = 0, num_pps = 0;
  151. const HEVCVPS *vps = h->ps.vps;
  152. const HEVCSPS *sps = h->ps.sps;
  153. const HEVCPPS *pps = h->ps.pps;
  154. PTLCommon ptlc = vps->ptl.general_ptl;
  155. VUI vui = sps->vui;
  156. uint8_t parallelismType;
  157. CFDataRef data = NULL;
  158. uint8_t *p;
  159. int vt_extradata_size = 23 + 3 + 3 + 3;
  160. uint8_t *vt_extradata;
  161. #define COUNT_SIZE_PS(T, t) \
  162. for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
  163. if (h->ps.t##ps_list[i]) { \
  164. const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
  165. vt_extradata_size += 2 + lps->data_size; \
  166. num_##t##ps++; \
  167. } \
  168. }
  169. COUNT_SIZE_PS(V, v)
  170. COUNT_SIZE_PS(S, s)
  171. COUNT_SIZE_PS(P, p)
  172. vt_extradata = av_malloc(vt_extradata_size);
  173. if (!vt_extradata)
  174. return NULL;
  175. p = vt_extradata;
  176. /* unsigned int(8) configurationVersion = 1; */
  177. AV_W8(p + 0, 1);
  178. /*
  179. * unsigned int(2) general_profile_space;
  180. * unsigned int(1) general_tier_flag;
  181. * unsigned int(5) general_profile_idc;
  182. */
  183. AV_W8(p + 1, ptlc.profile_space << 6 |
  184. ptlc.tier_flag << 5 |
  185. ptlc.profile_idc);
  186. /* unsigned int(32) general_profile_compatibility_flags; */
  187. memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
  188. /* unsigned int(48) general_constraint_indicator_flags; */
  189. AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
  190. ptlc.interlaced_source_flag << 6 |
  191. ptlc.non_packed_constraint_flag << 5 |
  192. ptlc.frame_only_constraint_flag << 4);
  193. AV_W8(p + 7, 0);
  194. AV_WN32(p + 8, 0);
  195. /* unsigned int(8) general_level_idc; */
  196. AV_W8(p + 12, ptlc.level_idc);
  197. /*
  198. * bit(4) reserved = ‘1111’b;
  199. * unsigned int(12) min_spatial_segmentation_idc;
  200. */
  201. AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
  202. AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
  203. /*
  204. * bit(6) reserved = ‘111111’b;
  205. * unsigned int(2) parallelismType;
  206. */
  207. if (!vui.min_spatial_segmentation_idc)
  208. parallelismType = 0;
  209. else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
  210. parallelismType = 0;
  211. else if (pps->entropy_coding_sync_enabled_flag)
  212. parallelismType = 3;
  213. else if (pps->tiles_enabled_flag)
  214. parallelismType = 2;
  215. else
  216. parallelismType = 1;
  217. AV_W8(p + 15, 0xfc | parallelismType);
  218. /*
  219. * bit(6) reserved = ‘111111’b;
  220. * unsigned int(2) chromaFormat;
  221. */
  222. AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
  223. /*
  224. * bit(5) reserved = ‘11111’b;
  225. * unsigned int(3) bitDepthLumaMinus8;
  226. */
  227. AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
  228. /*
  229. * bit(5) reserved = ‘11111’b;
  230. * unsigned int(3) bitDepthChromaMinus8;
  231. */
  232. AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
  233. /* bit(16) avgFrameRate; */
  234. AV_WB16(p + 19, 0);
  235. /*
  236. * bit(2) constantFrameRate;
  237. * bit(3) numTemporalLayers;
  238. * bit(1) temporalIdNested;
  239. * unsigned int(2) lengthSizeMinusOne;
  240. */
  241. AV_W8(p + 21, 0 << 6 |
  242. sps->max_sub_layers << 3 |
  243. sps->temporal_id_nesting_flag << 2 |
  244. 3);
  245. /* unsigned int(8) numOfArrays; */
  246. AV_W8(p + 22, 3);
  247. p += 23;
  248. #define APPEND_PS(T, t) \
  249. /* \
  250. * bit(1) array_completeness; \
  251. * unsigned int(1) reserved = 0; \
  252. * unsigned int(6) NAL_unit_type; \
  253. */ \
  254. AV_W8(p, 1 << 7 | \
  255. HEVC_NAL_##T##PS & 0x3f); \
  256. /* unsigned int(16) numNalus; */ \
  257. AV_WB16(p + 1, num_##t##ps); \
  258. p += 3; \
  259. for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
  260. if (h->ps.t##ps_list[i]) { \
  261. const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
  262. /* unsigned int(16) nalUnitLength; */ \
  263. AV_WB16(p, lps->data_size); \
  264. /* bit(8*nalUnitLength) nalUnit; */ \
  265. memcpy(p + 2, lps->data, lps->data_size); \
  266. p += 2 + lps->data_size; \
  267. } \
  268. }
  269. APPEND_PS(V, v)
  270. APPEND_PS(S, s)
  271. APPEND_PS(P, p)
  272. av_assert0(p - vt_extradata == vt_extradata_size);
  273. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  274. av_free(vt_extradata);
  275. return data;
  276. }
  277. int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
  278. const uint8_t *buffer,
  279. uint32_t size)
  280. {
  281. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  282. H264Context *h = avctx->priv_data;
  283. if (h->is_avc == 1) {
  284. return videotoolbox_buffer_copy(vtctx, buffer, size);
  285. }
  286. return 0;
  287. }
  288. static int videotoolbox_h264_decode_params(AVCodecContext *avctx,
  289. int type,
  290. const uint8_t *buffer,
  291. uint32_t size)
  292. {
  293. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  294. H264Context *h = avctx->priv_data;
  295. // save sps header (profile/level) used to create decoder session
  296. if (!vtctx->sps[0])
  297. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  298. if (type == H264_NAL_SPS) {
  299. if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
  300. vtctx->reconfig_needed = true;
  301. memcpy(vtctx->sps, buffer + 1, 3);
  302. }
  303. }
  304. // pass-through SPS/PPS changes to the decoder
  305. return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
  306. }
  307. static int videotoolbox_common_decode_slice(AVCodecContext *avctx,
  308. const uint8_t *buffer,
  309. uint32_t size)
  310. {
  311. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  312. void *tmp;
  313. tmp = av_fast_realloc(vtctx->bitstream,
  314. &vtctx->allocated_size,
  315. vtctx->bitstream_size+size+4);
  316. if (!tmp)
  317. return AVERROR(ENOMEM);
  318. vtctx->bitstream = tmp;
  319. AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
  320. memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
  321. vtctx->bitstream_size += size + 4;
  322. return 0;
  323. }
  324. int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
  325. const uint8_t *buffer,
  326. uint32_t size)
  327. {
  328. H264Context *h = avctx->priv_data;
  329. if (h->is_avc == 1)
  330. return 0;
  331. return videotoolbox_common_decode_slice(avctx, buffer, size);
  332. }
  333. int ff_videotoolbox_uninit(AVCodecContext *avctx)
  334. {
  335. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  336. if (vtctx) {
  337. av_freep(&vtctx->bitstream);
  338. if (vtctx->frame)
  339. CVPixelBufferRelease(vtctx->frame);
  340. }
  341. return 0;
  342. }
  343. #if CONFIG_VIDEOTOOLBOX
  344. // Return the AVVideotoolboxContext that matters currently. Where it comes from
  345. // depends on the API used.
  346. static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
  347. {
  348. // Somewhat tricky because the user can call av_videotoolbox_default_free()
  349. // at any time, even when the codec is closed.
  350. if (avctx->internal && avctx->internal->hwaccel_priv_data) {
  351. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  352. if (vtctx->vt_ctx)
  353. return vtctx->vt_ctx;
  354. }
  355. return avctx->hwaccel_context;
  356. }
  357. static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
  358. {
  359. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  360. CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
  361. OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
  362. enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
  363. int width = CVPixelBufferGetWidth(pixbuf);
  364. int height = CVPixelBufferGetHeight(pixbuf);
  365. AVHWFramesContext *cached_frames;
  366. VTHWFrame *ref;
  367. int ret;
  368. if (!frame->buf[0] || frame->data[3]) {
  369. av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
  370. av_frame_unref(frame);
  371. return AVERROR_EXTERNAL;
  372. }
  373. ref = (VTHWFrame *)frame->buf[0]->data;
  374. if (ref->pixbuf)
  375. CVPixelBufferRelease(ref->pixbuf);
  376. ref->pixbuf = vtctx->frame;
  377. vtctx->frame = NULL;
  378. // Old API code path.
  379. if (!vtctx->cached_hw_frames_ctx)
  380. return 0;
  381. cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
  382. if (cached_frames->sw_format != sw_format ||
  383. cached_frames->width != width ||
  384. cached_frames->height != height) {
  385. AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
  386. AVHWFramesContext *hw_frames;
  387. if (!hw_frames_ctx)
  388. return AVERROR(ENOMEM);
  389. hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
  390. hw_frames->format = cached_frames->format;
  391. hw_frames->sw_format = sw_format;
  392. hw_frames->width = width;
  393. hw_frames->height = height;
  394. ret = av_hwframe_ctx_init(hw_frames_ctx);
  395. if (ret < 0) {
  396. av_buffer_unref(&hw_frames_ctx);
  397. return ret;
  398. }
  399. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  400. vtctx->cached_hw_frames_ctx = hw_frames_ctx;
  401. }
  402. av_buffer_unref(&ref->hw_frames_ctx);
  403. ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
  404. if (!ref->hw_frames_ctx)
  405. return AVERROR(ENOMEM);
  406. return 0;
  407. }
  408. static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
  409. {
  410. int i;
  411. uint8_t b;
  412. for (i = 3; i >= 0; i--) {
  413. b = (length >> (i * 7)) & 0x7F;
  414. if (i != 0)
  415. b |= 0x80;
  416. bytestream2_put_byteu(pb, b);
  417. }
  418. }
  419. static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
  420. {
  421. CFDataRef data;
  422. uint8_t *rw_extradata;
  423. PutByteContext pb;
  424. int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
  425. // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
  426. int config_size = 13 + 5 + avctx->extradata_size;
  427. int s;
  428. if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
  429. return NULL;
  430. bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
  431. bytestream2_put_byteu(&pb, 0); // version
  432. bytestream2_put_ne24(&pb, 0); // flags
  433. // elementary stream descriptor
  434. bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
  435. videotoolbox_write_mp4_descr_length(&pb, full_size);
  436. bytestream2_put_ne16(&pb, 0); // esid
  437. bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
  438. // decoder configuration descriptor
  439. bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
  440. videotoolbox_write_mp4_descr_length(&pb, config_size);
  441. bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
  442. bytestream2_put_byteu(&pb, 0x11); // stream type
  443. bytestream2_put_ne24(&pb, 0); // buffer size
  444. bytestream2_put_ne32(&pb, 0); // max bitrate
  445. bytestream2_put_ne32(&pb, 0); // avg bitrate
  446. // decoder specific descriptor
  447. bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
  448. videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
  449. bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
  450. // SLConfigDescriptor
  451. bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
  452. bytestream2_put_byteu(&pb, 0x01); // length
  453. bytestream2_put_byteu(&pb, 0x02); //
  454. s = bytestream2_size_p(&pb);
  455. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
  456. av_freep(&rw_extradata);
  457. return data;
  458. }
  459. static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
  460. void *buffer,
  461. int size)
  462. {
  463. OSStatus status;
  464. CMBlockBufferRef block_buf;
  465. CMSampleBufferRef sample_buf;
  466. block_buf = NULL;
  467. sample_buf = NULL;
  468. status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
  469. buffer, // memoryBlock
  470. size, // blockLength
  471. kCFAllocatorNull, // blockAllocator
  472. NULL, // customBlockSource
  473. 0, // offsetToData
  474. size, // dataLength
  475. 0, // flags
  476. &block_buf);
  477. if (!status) {
  478. status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
  479. block_buf, // dataBuffer
  480. TRUE, // dataReady
  481. 0, // makeDataReadyCallback
  482. 0, // makeDataReadyRefcon
  483. fmt_desc, // formatDescription
  484. 1, // numSamples
  485. 0, // numSampleTimingEntries
  486. NULL, // sampleTimingArray
  487. 0, // numSampleSizeEntries
  488. NULL, // sampleSizeArray
  489. &sample_buf);
  490. }
  491. if (block_buf)
  492. CFRelease(block_buf);
  493. return sample_buf;
  494. }
  495. static void videotoolbox_decoder_callback(void *opaque,
  496. void *sourceFrameRefCon,
  497. OSStatus status,
  498. VTDecodeInfoFlags flags,
  499. CVImageBufferRef image_buffer,
  500. CMTime pts,
  501. CMTime duration)
  502. {
  503. AVCodecContext *avctx = opaque;
  504. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  505. if (vtctx->frame) {
  506. CVPixelBufferRelease(vtctx->frame);
  507. vtctx->frame = NULL;
  508. }
  509. if (!image_buffer) {
  510. av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
  511. return;
  512. }
  513. vtctx->frame = CVPixelBufferRetain(image_buffer);
  514. }
  515. static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
  516. {
  517. OSStatus status;
  518. CMSampleBufferRef sample_buf;
  519. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  520. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  521. sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
  522. vtctx->bitstream,
  523. vtctx->bitstream_size);
  524. if (!sample_buf)
  525. return -1;
  526. status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
  527. sample_buf,
  528. 0, // decodeFlags
  529. NULL, // sourceFrameRefCon
  530. 0); // infoFlagsOut
  531. if (status == noErr)
  532. status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
  533. CFRelease(sample_buf);
  534. return status;
  535. }
  536. static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
  537. CFDictionaryRef decoder_spec,
  538. int width,
  539. int height)
  540. {
  541. CMFormatDescriptionRef cm_fmt_desc;
  542. OSStatus status;
  543. status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
  544. codec_type,
  545. width,
  546. height,
  547. decoder_spec, // Dictionary of extension
  548. &cm_fmt_desc);
  549. if (status)
  550. return NULL;
  551. return cm_fmt_desc;
  552. }
  553. static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
  554. int height,
  555. OSType pix_fmt)
  556. {
  557. CFMutableDictionaryRef buffer_attributes;
  558. CFMutableDictionaryRef io_surface_properties;
  559. CFNumberRef cv_pix_fmt;
  560. CFNumberRef w;
  561. CFNumberRef h;
  562. w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
  563. h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
  564. cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
  565. buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
  566. 4,
  567. &kCFTypeDictionaryKeyCallBacks,
  568. &kCFTypeDictionaryValueCallBacks);
  569. io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
  570. 0,
  571. &kCFTypeDictionaryKeyCallBacks,
  572. &kCFTypeDictionaryValueCallBacks);
  573. if (pix_fmt)
  574. CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
  575. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
  576. CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
  577. CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
  578. #if TARGET_OS_IPHONE
  579. CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
  580. #else
  581. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
  582. #endif
  583. CFRelease(io_surface_properties);
  584. CFRelease(cv_pix_fmt);
  585. CFRelease(w);
  586. CFRelease(h);
  587. return buffer_attributes;
  588. }
  589. static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
  590. AVCodecContext *avctx)
  591. {
  592. CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  593. 0,
  594. &kCFTypeDictionaryKeyCallBacks,
  595. &kCFTypeDictionaryValueCallBacks);
  596. CFDictionarySetValue(config_info,
  597. codec_type == kCMVideoCodecType_HEVC ?
  598. kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder :
  599. kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
  600. kCFBooleanTrue);
  601. CFMutableDictionaryRef avc_info;
  602. CFDataRef data = NULL;
  603. avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  604. 1,
  605. &kCFTypeDictionaryKeyCallBacks,
  606. &kCFTypeDictionaryValueCallBacks);
  607. switch (codec_type) {
  608. case kCMVideoCodecType_MPEG4Video :
  609. if (avctx->extradata_size)
  610. data = videotoolbox_esds_extradata_create(avctx);
  611. if (data)
  612. CFDictionarySetValue(avc_info, CFSTR("esds"), data);
  613. break;
  614. case kCMVideoCodecType_H264 :
  615. data = ff_videotoolbox_avcc_extradata_create(avctx);
  616. if (data)
  617. CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
  618. break;
  619. case kCMVideoCodecType_HEVC :
  620. data = ff_videotoolbox_hvcc_extradata_create(avctx);
  621. if (data)
  622. CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
  623. break;
  624. default:
  625. break;
  626. }
  627. CFDictionarySetValue(config_info,
  628. kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
  629. avc_info);
  630. if (data)
  631. CFRelease(data);
  632. CFRelease(avc_info);
  633. return config_info;
  634. }
  635. static int videotoolbox_start(AVCodecContext *avctx)
  636. {
  637. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  638. OSStatus status;
  639. VTDecompressionOutputCallbackRecord decoder_cb;
  640. CFDictionaryRef decoder_spec;
  641. CFDictionaryRef buf_attr;
  642. if (!videotoolbox) {
  643. av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
  644. return -1;
  645. }
  646. switch( avctx->codec_id ) {
  647. case AV_CODEC_ID_H263 :
  648. videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
  649. break;
  650. case AV_CODEC_ID_H264 :
  651. videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
  652. break;
  653. case AV_CODEC_ID_HEVC :
  654. videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
  655. break;
  656. case AV_CODEC_ID_MPEG1VIDEO :
  657. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
  658. break;
  659. case AV_CODEC_ID_MPEG2VIDEO :
  660. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
  661. break;
  662. case AV_CODEC_ID_MPEG4 :
  663. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
  664. break;
  665. default :
  666. break;
  667. }
  668. decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
  669. if (!decoder_spec) {
  670. av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
  671. return -1;
  672. }
  673. videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
  674. decoder_spec,
  675. avctx->width,
  676. avctx->height);
  677. if (!videotoolbox->cm_fmt_desc) {
  678. if (decoder_spec)
  679. CFRelease(decoder_spec);
  680. av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
  681. return -1;
  682. }
  683. buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
  684. avctx->height,
  685. videotoolbox->cv_pix_fmt_type);
  686. decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
  687. decoder_cb.decompressionOutputRefCon = avctx;
  688. status = VTDecompressionSessionCreate(NULL, // allocator
  689. videotoolbox->cm_fmt_desc, // videoFormatDescription
  690. decoder_spec, // videoDecoderSpecification
  691. buf_attr, // destinationImageBufferAttributes
  692. &decoder_cb, // outputCallback
  693. &videotoolbox->session); // decompressionSessionOut
  694. if (decoder_spec)
  695. CFRelease(decoder_spec);
  696. if (buf_attr)
  697. CFRelease(buf_attr);
  698. switch (status) {
  699. case kVTVideoDecoderNotAvailableNowErr:
  700. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
  701. return AVERROR(ENOSYS);
  702. case kVTVideoDecoderUnsupportedDataFormatErr:
  703. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
  704. return AVERROR(ENOSYS);
  705. case kVTCouldNotFindVideoDecoderErr:
  706. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
  707. return AVERROR(ENOSYS);
  708. case kVTVideoDecoderMalfunctionErr:
  709. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
  710. return AVERROR(EINVAL);
  711. case kVTVideoDecoderBadDataErr:
  712. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
  713. return AVERROR_INVALIDDATA;
  714. case 0:
  715. return 0;
  716. default:
  717. av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
  718. return AVERROR_UNKNOWN;
  719. }
  720. }
  721. static void videotoolbox_stop(AVCodecContext *avctx)
  722. {
  723. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  724. if (!videotoolbox)
  725. return;
  726. if (videotoolbox->cm_fmt_desc) {
  727. CFRelease(videotoolbox->cm_fmt_desc);
  728. videotoolbox->cm_fmt_desc = NULL;
  729. }
  730. if (videotoolbox->session) {
  731. VTDecompressionSessionInvalidate(videotoolbox->session);
  732. CFRelease(videotoolbox->session);
  733. videotoolbox->session = NULL;
  734. }
  735. }
  736. static const char *videotoolbox_error_string(OSStatus status)
  737. {
  738. switch (status) {
  739. case kVTVideoDecoderBadDataErr:
  740. return "bad data";
  741. case kVTVideoDecoderMalfunctionErr:
  742. return "decoder malfunction";
  743. case kVTInvalidSessionErr:
  744. return "invalid session";
  745. }
  746. return "unknown";
  747. }
  748. static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
  749. {
  750. OSStatus status;
  751. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  752. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  753. frame->crop_right = 0;
  754. frame->crop_left = 0;
  755. frame->crop_top = 0;
  756. frame->crop_bottom = 0;
  757. if (vtctx->reconfig_needed == true) {
  758. vtctx->reconfig_needed = false;
  759. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
  760. videotoolbox_stop(avctx);
  761. if (videotoolbox_start(avctx) != 0) {
  762. return AVERROR_EXTERNAL;
  763. }
  764. }
  765. if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
  766. return AVERROR_INVALIDDATA;
  767. status = videotoolbox_session_decode_frame(avctx);
  768. if (status != noErr) {
  769. if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
  770. vtctx->reconfig_needed = true;
  771. av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
  772. return AVERROR_UNKNOWN;
  773. }
  774. if (!vtctx->frame) {
  775. vtctx->reconfig_needed = true;
  776. return AVERROR_UNKNOWN;
  777. }
  778. return videotoolbox_buffer_create(avctx, frame);
  779. }
  780. static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
  781. {
  782. H264Context *h = avctx->priv_data;
  783. AVFrame *frame = h->cur_pic_ptr->f;
  784. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  785. int ret = videotoolbox_common_end_frame(avctx, frame);
  786. vtctx->bitstream_size = 0;
  787. return ret;
  788. }
  789. static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
  790. const uint8_t *buffer,
  791. uint32_t size)
  792. {
  793. return 0;
  794. }
  795. static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
  796. const uint8_t *buffer,
  797. uint32_t size)
  798. {
  799. return videotoolbox_common_decode_slice(avctx, buffer, size);
  800. }
  801. static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
  802. int type,
  803. const uint8_t *buffer,
  804. uint32_t size)
  805. {
  806. return videotoolbox_common_decode_slice(avctx, buffer, size);
  807. }
  808. static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
  809. {
  810. HEVCContext *h = avctx->priv_data;
  811. AVFrame *frame = h->ref->frame;
  812. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  813. h->output_frame->crop_right = 0;
  814. h->output_frame->crop_left = 0;
  815. h->output_frame->crop_top = 0;
  816. h->output_frame->crop_bottom = 0;
  817. int ret = videotoolbox_common_end_frame(avctx, frame);
  818. vtctx->bitstream_size = 0;
  819. return ret;
  820. }
  821. static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
  822. const uint8_t *buffer,
  823. uint32_t size)
  824. {
  825. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  826. return videotoolbox_buffer_copy(vtctx, buffer, size);
  827. }
  828. static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
  829. const uint8_t *buffer,
  830. uint32_t size)
  831. {
  832. return 0;
  833. }
  834. static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
  835. {
  836. MpegEncContext *s = avctx->priv_data;
  837. AVFrame *frame = s->current_picture_ptr->f;
  838. return videotoolbox_common_end_frame(avctx, frame);
  839. }
  840. static int videotoolbox_uninit(AVCodecContext *avctx)
  841. {
  842. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  843. if (!vtctx)
  844. return 0;
  845. ff_videotoolbox_uninit(avctx);
  846. if (vtctx->vt_ctx)
  847. videotoolbox_stop(avctx);
  848. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  849. av_freep(&vtctx->vt_ctx);
  850. return 0;
  851. }
  852. static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
  853. const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->pix_fmt);
  854. if (!descriptor)
  855. return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
  856. int depth = descriptor->comp[0].depth;
  857. if (depth > 8) {
  858. return AV_PIX_FMT_P010;
  859. }
  860. return AV_PIX_FMT_NV12;
  861. }
  862. static int videotoolbox_common_init(AVCodecContext *avctx)
  863. {
  864. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  865. AVHWFramesContext *hw_frames;
  866. int err;
  867. // Old API - do nothing.
  868. if (avctx->hwaccel_context)
  869. return 0;
  870. if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
  871. av_log(avctx, AV_LOG_ERROR,
  872. "Either hw_frames_ctx or hw_device_ctx must be set.\n");
  873. return AVERROR(EINVAL);
  874. }
  875. vtctx->vt_ctx = av_videotoolbox_alloc_context();
  876. if (!vtctx->vt_ctx) {
  877. err = AVERROR(ENOMEM);
  878. goto fail;
  879. }
  880. if (avctx->hw_frames_ctx) {
  881. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  882. } else {
  883. avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
  884. if (!avctx->hw_frames_ctx) {
  885. err = AVERROR(ENOMEM);
  886. goto fail;
  887. }
  888. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  889. hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
  890. hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
  891. hw_frames->width = avctx->width;
  892. hw_frames->height = avctx->height;
  893. err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
  894. if (err < 0) {
  895. av_buffer_unref(&avctx->hw_frames_ctx);
  896. goto fail;
  897. }
  898. }
  899. vtctx->cached_hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
  900. if (!vtctx->cached_hw_frames_ctx) {
  901. err = AVERROR(ENOMEM);
  902. goto fail;
  903. }
  904. bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
  905. vtctx->vt_ctx->cv_pix_fmt_type =
  906. av_map_videotoolbox_format_from_pixfmt2(hw_frames->sw_format, full_range);
  907. if (!vtctx->vt_ctx->cv_pix_fmt_type) {
  908. av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
  909. err = AVERROR(EINVAL);
  910. goto fail;
  911. }
  912. err = videotoolbox_start(avctx);
  913. if (err < 0)
  914. goto fail;
  915. return 0;
  916. fail:
  917. videotoolbox_uninit(avctx);
  918. return err;
  919. }
  920. static int videotoolbox_frame_params(AVCodecContext *avctx,
  921. AVBufferRef *hw_frames_ctx)
  922. {
  923. AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
  924. frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
  925. frames_ctx->width = avctx->coded_width;
  926. frames_ctx->height = avctx->coded_height;
  927. frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
  928. return 0;
  929. }
  930. const AVHWAccel ff_h263_videotoolbox_hwaccel = {
  931. .name = "h263_videotoolbox",
  932. .type = AVMEDIA_TYPE_VIDEO,
  933. .id = AV_CODEC_ID_H263,
  934. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  935. .alloc_frame = ff_videotoolbox_alloc_frame,
  936. .start_frame = videotoolbox_mpeg_start_frame,
  937. .decode_slice = videotoolbox_mpeg_decode_slice,
  938. .end_frame = videotoolbox_mpeg_end_frame,
  939. .frame_params = videotoolbox_frame_params,
  940. .init = videotoolbox_common_init,
  941. .uninit = videotoolbox_uninit,
  942. .priv_data_size = sizeof(VTContext),
  943. };
  944. const AVHWAccel ff_hevc_videotoolbox_hwaccel = {
  945. .name = "hevc_videotoolbox",
  946. .type = AVMEDIA_TYPE_VIDEO,
  947. .id = AV_CODEC_ID_HEVC,
  948. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  949. .alloc_frame = ff_videotoolbox_alloc_frame,
  950. .start_frame = videotoolbox_hevc_start_frame,
  951. .decode_slice = videotoolbox_hevc_decode_slice,
  952. .decode_params = videotoolbox_hevc_decode_params,
  953. .end_frame = videotoolbox_hevc_end_frame,
  954. .frame_params = videotoolbox_frame_params,
  955. .init = videotoolbox_common_init,
  956. .uninit = videotoolbox_uninit,
  957. .priv_data_size = sizeof(VTContext),
  958. };
  959. const AVHWAccel ff_h264_videotoolbox_hwaccel = {
  960. .name = "h264_videotoolbox",
  961. .type = AVMEDIA_TYPE_VIDEO,
  962. .id = AV_CODEC_ID_H264,
  963. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  964. .alloc_frame = ff_videotoolbox_alloc_frame,
  965. .start_frame = ff_videotoolbox_h264_start_frame,
  966. .decode_slice = ff_videotoolbox_h264_decode_slice,
  967. .decode_params = videotoolbox_h264_decode_params,
  968. .end_frame = videotoolbox_h264_end_frame,
  969. .frame_params = videotoolbox_frame_params,
  970. .init = videotoolbox_common_init,
  971. .uninit = videotoolbox_uninit,
  972. .priv_data_size = sizeof(VTContext),
  973. };
  974. const AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
  975. .name = "mpeg1_videotoolbox",
  976. .type = AVMEDIA_TYPE_VIDEO,
  977. .id = AV_CODEC_ID_MPEG1VIDEO,
  978. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  979. .alloc_frame = ff_videotoolbox_alloc_frame,
  980. .start_frame = videotoolbox_mpeg_start_frame,
  981. .decode_slice = videotoolbox_mpeg_decode_slice,
  982. .end_frame = videotoolbox_mpeg_end_frame,
  983. .frame_params = videotoolbox_frame_params,
  984. .init = videotoolbox_common_init,
  985. .uninit = videotoolbox_uninit,
  986. .priv_data_size = sizeof(VTContext),
  987. };
  988. const AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
  989. .name = "mpeg2_videotoolbox",
  990. .type = AVMEDIA_TYPE_VIDEO,
  991. .id = AV_CODEC_ID_MPEG2VIDEO,
  992. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  993. .alloc_frame = ff_videotoolbox_alloc_frame,
  994. .start_frame = videotoolbox_mpeg_start_frame,
  995. .decode_slice = videotoolbox_mpeg_decode_slice,
  996. .end_frame = videotoolbox_mpeg_end_frame,
  997. .frame_params = videotoolbox_frame_params,
  998. .init = videotoolbox_common_init,
  999. .uninit = videotoolbox_uninit,
  1000. .priv_data_size = sizeof(VTContext),
  1001. };
  1002. const AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
  1003. .name = "mpeg4_videotoolbox",
  1004. .type = AVMEDIA_TYPE_VIDEO,
  1005. .id = AV_CODEC_ID_MPEG4,
  1006. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  1007. .alloc_frame = ff_videotoolbox_alloc_frame,
  1008. .start_frame = videotoolbox_mpeg_start_frame,
  1009. .decode_slice = videotoolbox_mpeg_decode_slice,
  1010. .end_frame = videotoolbox_mpeg_end_frame,
  1011. .frame_params = videotoolbox_frame_params,
  1012. .init = videotoolbox_common_init,
  1013. .uninit = videotoolbox_uninit,
  1014. .priv_data_size = sizeof(VTContext),
  1015. };
  1016. static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
  1017. bool full_range)
  1018. {
  1019. AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
  1020. if (ret) {
  1021. ret->output_callback = videotoolbox_decoder_callback;
  1022. OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
  1023. if (cv_pix_fmt_type == 0) {
  1024. cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  1025. }
  1026. ret->cv_pix_fmt_type = cv_pix_fmt_type;
  1027. }
  1028. return ret;
  1029. }
  1030. AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
  1031. {
  1032. return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
  1033. }
  1034. int av_videotoolbox_default_init(AVCodecContext *avctx)
  1035. {
  1036. return av_videotoolbox_default_init2(avctx, NULL);
  1037. }
  1038. int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
  1039. {
  1040. enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
  1041. bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
  1042. avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
  1043. if (!avctx->hwaccel_context)
  1044. return AVERROR(ENOMEM);
  1045. return videotoolbox_start(avctx);
  1046. }
  1047. void av_videotoolbox_default_free(AVCodecContext *avctx)
  1048. {
  1049. videotoolbox_stop(avctx);
  1050. av_freep(&avctx->hwaccel_context);
  1051. }
  1052. #endif /* CONFIG_VIDEOTOOLBOX */