You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

702 lines
24KB

  1. /*
  2. * Videotoolbox hardware acceleration
  3. *
  4. * copyright (c) 2012 Sebastien Zwickert
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if CONFIG_VIDEOTOOLBOX
  24. # include "videotoolbox.h"
  25. #else
  26. # include "vda.h"
  27. #endif
  28. #include "vda_vt_internal.h"
  29. #include "libavutil/avutil.h"
  30. #include "bytestream.h"
  31. #include "h264.h"
  32. #include "mpegvideo.h"
  33. #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
  34. # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
  35. #endif
  36. #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
  37. static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
  38. {
  39. CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
  40. CVPixelBufferRelease(cv_buffer);
  41. }
  42. static int videotoolbox_buffer_copy(VTContext *vtctx,
  43. const uint8_t *buffer,
  44. uint32_t size)
  45. {
  46. void *tmp;
  47. tmp = av_fast_realloc(vtctx->bitstream,
  48. &vtctx->allocated_size,
  49. size);
  50. if (!tmp)
  51. return AVERROR(ENOMEM);
  52. vtctx->bitstream = tmp;
  53. memcpy(vtctx->bitstream, buffer, size);
  54. vtctx->bitstream_size = size;
  55. return 0;
  56. }
  57. int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
  58. {
  59. frame->width = avctx->width;
  60. frame->height = avctx->height;
  61. frame->format = avctx->pix_fmt;
  62. frame->buf[0] = av_buffer_alloc(1);
  63. if (!frame->buf[0])
  64. return AVERROR(ENOMEM);
  65. return 0;
  66. }
  67. #define AV_W8(p, v) *(p) = (v)
  68. CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
  69. {
  70. H264Context *h = avctx->priv_data;
  71. CFDataRef data = NULL;
  72. uint8_t *p;
  73. int vt_extradata_size = 6 + 3 + h->ps.sps->data_size + 4 + h->ps.sps->data_size;
  74. uint8_t *vt_extradata = av_malloc(vt_extradata_size);
  75. if (!vt_extradata)
  76. return NULL;
  77. p = vt_extradata;
  78. AV_W8(p + 0, 1); /* version */
  79. AV_W8(p + 1, h->ps.sps->data[0]); /* profile */
  80. AV_W8(p + 2, h->ps.sps->data[1]); /* profile compat */
  81. AV_W8(p + 3, h->ps.sps->data[2]); /* level */
  82. AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
  83. AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
  84. AV_WB16(p + 6, h->ps.sps->data_size + 1);
  85. AV_W8(p + 8, NAL_SPS | (3 << 5)); // NAL unit header
  86. memcpy(p + 9, h->ps.sps->data, h->ps.sps->data_size);
  87. p += 9 + h->ps.sps->data_size;
  88. AV_W8(p + 0, 1); /* number of pps */
  89. AV_WB16(p + 1, h->ps.pps->data_size + 1);
  90. AV_W8(p + 3, NAL_PPS | (3 << 5)); // NAL unit header
  91. memcpy(p + 4, h->ps.pps->data, h->ps.pps->data_size);
  92. p += 4 + h->ps.pps->data_size;
  93. av_assert0(p - vt_extradata == vt_extradata_size);
  94. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  95. av_free(vt_extradata);
  96. return data;
  97. }
  98. int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
  99. {
  100. av_buffer_unref(&frame->buf[0]);
  101. frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
  102. sizeof(vtctx->frame),
  103. videotoolbox_buffer_release,
  104. NULL,
  105. AV_BUFFER_FLAG_READONLY);
  106. if (!frame->buf[0]) {
  107. return AVERROR(ENOMEM);
  108. }
  109. frame->data[3] = (uint8_t*)vtctx->frame;
  110. vtctx->frame = NULL;
  111. return 0;
  112. }
  113. int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
  114. const uint8_t *buffer,
  115. uint32_t size)
  116. {
  117. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  118. H264Context *h = avctx->priv_data;
  119. vtctx->bitstream_size = 0;
  120. if (h->is_avc == 1) {
  121. return videotoolbox_buffer_copy(vtctx, buffer, size);
  122. }
  123. return 0;
  124. }
  125. int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
  126. const uint8_t *buffer,
  127. uint32_t size)
  128. {
  129. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  130. H264Context *h = avctx->priv_data;
  131. void *tmp;
  132. if (h->is_avc == 1)
  133. return 0;
  134. tmp = av_fast_realloc(vtctx->bitstream,
  135. &vtctx->allocated_size,
  136. vtctx->bitstream_size+size+4);
  137. if (!tmp)
  138. return AVERROR(ENOMEM);
  139. vtctx->bitstream = tmp;
  140. AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
  141. memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
  142. vtctx->bitstream_size += size + 4;
  143. return 0;
  144. }
  145. int ff_videotoolbox_uninit(AVCodecContext *avctx)
  146. {
  147. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  148. if (vtctx) {
  149. av_freep(&vtctx->bitstream);
  150. if (vtctx->frame)
  151. CVPixelBufferRelease(vtctx->frame);
  152. }
  153. return 0;
  154. }
  155. #if CONFIG_VIDEOTOOLBOX
  156. static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
  157. {
  158. int i;
  159. uint8_t b;
  160. for (i = 3; i >= 0; i--) {
  161. b = (length >> (i * 7)) & 0x7F;
  162. if (i != 0)
  163. b |= 0x80;
  164. bytestream2_put_byteu(pb, b);
  165. }
  166. }
  167. static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
  168. {
  169. CFDataRef data;
  170. uint8_t *rw_extradata;
  171. PutByteContext pb;
  172. int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
  173. // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
  174. int config_size = 13 + 5 + avctx->extradata_size;
  175. int s;
  176. if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
  177. return NULL;
  178. bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
  179. bytestream2_put_byteu(&pb, 0); // version
  180. bytestream2_put_ne24(&pb, 0); // flags
  181. // elementary stream descriptor
  182. bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
  183. videotoolbox_write_mp4_descr_length(&pb, full_size);
  184. bytestream2_put_ne16(&pb, 0); // esid
  185. bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
  186. // decoder configuration descriptor
  187. bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
  188. videotoolbox_write_mp4_descr_length(&pb, config_size);
  189. bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
  190. bytestream2_put_byteu(&pb, 0x11); // stream type
  191. bytestream2_put_ne24(&pb, 0); // buffer size
  192. bytestream2_put_ne32(&pb, 0); // max bitrate
  193. bytestream2_put_ne32(&pb, 0); // avg bitrate
  194. // decoder specific descriptor
  195. bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
  196. videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
  197. bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
  198. // SLConfigDescriptor
  199. bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
  200. bytestream2_put_byteu(&pb, 0x01); // length
  201. bytestream2_put_byteu(&pb, 0x02); //
  202. s = bytestream2_size_p(&pb);
  203. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
  204. av_freep(&rw_extradata);
  205. return data;
  206. }
  207. static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
  208. void *buffer,
  209. int size)
  210. {
  211. OSStatus status;
  212. CMBlockBufferRef block_buf;
  213. CMSampleBufferRef sample_buf;
  214. block_buf = NULL;
  215. sample_buf = NULL;
  216. status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
  217. buffer, // memoryBlock
  218. size, // blockLength
  219. kCFAllocatorNull, // blockAllocator
  220. NULL, // customBlockSource
  221. 0, // offsetToData
  222. size, // dataLength
  223. 0, // flags
  224. &block_buf);
  225. if (!status) {
  226. status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
  227. block_buf, // dataBuffer
  228. TRUE, // dataReady
  229. 0, // makeDataReadyCallback
  230. 0, // makeDataReadyRefcon
  231. fmt_desc, // formatDescription
  232. 1, // numSamples
  233. 0, // numSampleTimingEntries
  234. NULL, // sampleTimingArray
  235. 0, // numSampleSizeEntries
  236. NULL, // sampleSizeArray
  237. &sample_buf);
  238. }
  239. if (block_buf)
  240. CFRelease(block_buf);
  241. return sample_buf;
  242. }
  243. static void videotoolbox_decoder_callback(void *opaque,
  244. void *sourceFrameRefCon,
  245. OSStatus status,
  246. VTDecodeInfoFlags flags,
  247. CVImageBufferRef image_buffer,
  248. CMTime pts,
  249. CMTime duration)
  250. {
  251. AVCodecContext *avctx = opaque;
  252. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  253. if (vtctx->frame) {
  254. CVPixelBufferRelease(vtctx->frame);
  255. vtctx->frame = NULL;
  256. }
  257. if (!image_buffer) {
  258. av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
  259. return;
  260. }
  261. vtctx->frame = CVPixelBufferRetain(image_buffer);
  262. }
  263. static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
  264. {
  265. OSStatus status;
  266. CMSampleBufferRef sample_buf;
  267. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  268. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  269. sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
  270. vtctx->bitstream,
  271. vtctx->bitstream_size);
  272. if (!sample_buf)
  273. return -1;
  274. status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
  275. sample_buf,
  276. 0, // decodeFlags
  277. NULL, // sourceFrameRefCon
  278. 0); // infoFlagsOut
  279. if (status == noErr)
  280. status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
  281. CFRelease(sample_buf);
  282. return status;
  283. }
  284. static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
  285. {
  286. int status;
  287. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  288. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  289. av_buffer_unref(&frame->buf[0]);
  290. if (!videotoolbox->session || !vtctx->bitstream)
  291. return AVERROR_INVALIDDATA;
  292. status = videotoolbox_session_decode_frame(avctx);
  293. if (status) {
  294. av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
  295. return AVERROR_UNKNOWN;
  296. }
  297. if (!vtctx->frame)
  298. return AVERROR_UNKNOWN;
  299. return ff_videotoolbox_buffer_create(vtctx, frame);
  300. }
  301. static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
  302. {
  303. H264Context *h = avctx->priv_data;
  304. AVFrame *frame = h->cur_pic_ptr->f;
  305. return videotoolbox_common_end_frame(avctx, frame);
  306. }
  307. static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
  308. const uint8_t *buffer,
  309. uint32_t size)
  310. {
  311. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  312. return videotoolbox_buffer_copy(vtctx, buffer, size);
  313. }
  314. static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
  315. const uint8_t *buffer,
  316. uint32_t size)
  317. {
  318. return 0;
  319. }
  320. static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
  321. {
  322. MpegEncContext *s = avctx->priv_data;
  323. AVFrame *frame = s->current_picture_ptr->f;
  324. return videotoolbox_common_end_frame(avctx, frame);
  325. }
  326. static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
  327. AVCodecContext *avctx)
  328. {
  329. CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  330. 1,
  331. &kCFTypeDictionaryKeyCallBacks,
  332. &kCFTypeDictionaryValueCallBacks);
  333. CFDictionarySetValue(config_info,
  334. kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
  335. kCFBooleanTrue);
  336. if (avctx->extradata_size) {
  337. CFMutableDictionaryRef avc_info;
  338. CFDataRef data = NULL;
  339. avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  340. 1,
  341. &kCFTypeDictionaryKeyCallBacks,
  342. &kCFTypeDictionaryValueCallBacks);
  343. switch (codec_type) {
  344. case kCMVideoCodecType_MPEG4Video :
  345. data = videotoolbox_esds_extradata_create(avctx);
  346. if (data)
  347. CFDictionarySetValue(avc_info, CFSTR("esds"), data);
  348. break;
  349. case kCMVideoCodecType_H264 :
  350. data = ff_videotoolbox_avcc_extradata_create(avctx);
  351. if (data)
  352. CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
  353. break;
  354. default:
  355. break;
  356. }
  357. CFDictionarySetValue(config_info,
  358. kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
  359. avc_info);
  360. if (data)
  361. CFRelease(data);
  362. CFRelease(avc_info);
  363. }
  364. return config_info;
  365. }
  366. static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
  367. int height,
  368. OSType pix_fmt)
  369. {
  370. CFMutableDictionaryRef buffer_attributes;
  371. CFMutableDictionaryRef io_surface_properties;
  372. CFNumberRef cv_pix_fmt;
  373. CFNumberRef w;
  374. CFNumberRef h;
  375. w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
  376. h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
  377. cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
  378. buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
  379. 4,
  380. &kCFTypeDictionaryKeyCallBacks,
  381. &kCFTypeDictionaryValueCallBacks);
  382. io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
  383. 0,
  384. &kCFTypeDictionaryKeyCallBacks,
  385. &kCFTypeDictionaryValueCallBacks);
  386. CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
  387. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
  388. CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
  389. CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
  390. CFRelease(io_surface_properties);
  391. CFRelease(cv_pix_fmt);
  392. CFRelease(w);
  393. CFRelease(h);
  394. return buffer_attributes;
  395. }
  396. static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
  397. CFDictionaryRef decoder_spec,
  398. int width,
  399. int height)
  400. {
  401. CMFormatDescriptionRef cm_fmt_desc;
  402. OSStatus status;
  403. status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
  404. codec_type,
  405. width,
  406. height,
  407. decoder_spec, // Dictionary of extension
  408. &cm_fmt_desc);
  409. if (status)
  410. return NULL;
  411. return cm_fmt_desc;
  412. }
  413. static int videotoolbox_default_init(AVCodecContext *avctx)
  414. {
  415. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  416. OSStatus status;
  417. VTDecompressionOutputCallbackRecord decoder_cb;
  418. CFDictionaryRef decoder_spec;
  419. CFDictionaryRef buf_attr;
  420. if (!videotoolbox) {
  421. av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
  422. return -1;
  423. }
  424. switch( avctx->codec_id ) {
  425. case AV_CODEC_ID_H263 :
  426. videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
  427. break;
  428. case AV_CODEC_ID_H264 :
  429. videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
  430. break;
  431. case AV_CODEC_ID_MPEG1VIDEO :
  432. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
  433. break;
  434. case AV_CODEC_ID_MPEG2VIDEO :
  435. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
  436. break;
  437. case AV_CODEC_ID_MPEG4 :
  438. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
  439. break;
  440. default :
  441. break;
  442. }
  443. decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
  444. videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
  445. decoder_spec,
  446. avctx->width,
  447. avctx->height);
  448. if (!videotoolbox->cm_fmt_desc) {
  449. if (decoder_spec)
  450. CFRelease(decoder_spec);
  451. av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
  452. return -1;
  453. }
  454. buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
  455. avctx->height,
  456. videotoolbox->cv_pix_fmt_type);
  457. decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
  458. decoder_cb.decompressionOutputRefCon = avctx;
  459. status = VTDecompressionSessionCreate(NULL, // allocator
  460. videotoolbox->cm_fmt_desc, // videoFormatDescription
  461. decoder_spec, // videoDecoderSpecification
  462. buf_attr, // destinationImageBufferAttributes
  463. &decoder_cb, // outputCallback
  464. &videotoolbox->session); // decompressionSessionOut
  465. if (decoder_spec)
  466. CFRelease(decoder_spec);
  467. if (buf_attr)
  468. CFRelease(buf_attr);
  469. switch (status) {
  470. case kVTVideoDecoderNotAvailableNowErr:
  471. case kVTVideoDecoderUnsupportedDataFormatErr:
  472. return AVERROR(ENOSYS);
  473. case kVTVideoDecoderMalfunctionErr:
  474. return AVERROR(EINVAL);
  475. case kVTVideoDecoderBadDataErr :
  476. return AVERROR_INVALIDDATA;
  477. case 0:
  478. return 0;
  479. default:
  480. return AVERROR_UNKNOWN;
  481. }
  482. }
  483. static void videotoolbox_default_free(AVCodecContext *avctx)
  484. {
  485. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  486. if (videotoolbox) {
  487. if (videotoolbox->cm_fmt_desc)
  488. CFRelease(videotoolbox->cm_fmt_desc);
  489. if (videotoolbox->session)
  490. VTDecompressionSessionInvalidate(videotoolbox->session);
  491. }
  492. }
  493. AVHWAccel ff_h263_videotoolbox_hwaccel = {
  494. .name = "h263_videotoolbox",
  495. .type = AVMEDIA_TYPE_VIDEO,
  496. .id = AV_CODEC_ID_H263,
  497. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  498. .alloc_frame = ff_videotoolbox_alloc_frame,
  499. .start_frame = videotoolbox_mpeg_start_frame,
  500. .decode_slice = videotoolbox_mpeg_decode_slice,
  501. .end_frame = videotoolbox_mpeg_end_frame,
  502. .uninit = ff_videotoolbox_uninit,
  503. .priv_data_size = sizeof(VTContext),
  504. };
  505. AVHWAccel ff_h264_videotoolbox_hwaccel = {
  506. .name = "h264_videotoolbox",
  507. .type = AVMEDIA_TYPE_VIDEO,
  508. .id = AV_CODEC_ID_H264,
  509. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  510. .alloc_frame = ff_videotoolbox_alloc_frame,
  511. .start_frame = ff_videotoolbox_h264_start_frame,
  512. .decode_slice = ff_videotoolbox_h264_decode_slice,
  513. .end_frame = videotoolbox_h264_end_frame,
  514. .uninit = ff_videotoolbox_uninit,
  515. .priv_data_size = sizeof(VTContext),
  516. };
  517. AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
  518. .name = "mpeg1_videotoolbox",
  519. .type = AVMEDIA_TYPE_VIDEO,
  520. .id = AV_CODEC_ID_MPEG1VIDEO,
  521. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  522. .alloc_frame = ff_videotoolbox_alloc_frame,
  523. .start_frame = videotoolbox_mpeg_start_frame,
  524. .decode_slice = videotoolbox_mpeg_decode_slice,
  525. .end_frame = videotoolbox_mpeg_end_frame,
  526. .uninit = ff_videotoolbox_uninit,
  527. .priv_data_size = sizeof(VTContext),
  528. };
  529. AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
  530. .name = "mpeg2_videotoolbox",
  531. .type = AVMEDIA_TYPE_VIDEO,
  532. .id = AV_CODEC_ID_MPEG2VIDEO,
  533. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  534. .alloc_frame = ff_videotoolbox_alloc_frame,
  535. .start_frame = videotoolbox_mpeg_start_frame,
  536. .decode_slice = videotoolbox_mpeg_decode_slice,
  537. .end_frame = videotoolbox_mpeg_end_frame,
  538. .uninit = ff_videotoolbox_uninit,
  539. .priv_data_size = sizeof(VTContext),
  540. };
  541. AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
  542. .name = "mpeg4_videotoolbox",
  543. .type = AVMEDIA_TYPE_VIDEO,
  544. .id = AV_CODEC_ID_MPEG4,
  545. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  546. .alloc_frame = ff_videotoolbox_alloc_frame,
  547. .start_frame = videotoolbox_mpeg_start_frame,
  548. .decode_slice = videotoolbox_mpeg_decode_slice,
  549. .end_frame = videotoolbox_mpeg_end_frame,
  550. .uninit = ff_videotoolbox_uninit,
  551. .priv_data_size = sizeof(VTContext),
  552. };
  553. AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
  554. {
  555. AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
  556. if (ret) {
  557. ret->output_callback = videotoolbox_decoder_callback;
  558. ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  559. }
  560. return ret;
  561. }
  562. int av_videotoolbox_default_init(AVCodecContext *avctx)
  563. {
  564. return av_videotoolbox_default_init2(avctx, NULL);
  565. }
  566. int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
  567. {
  568. avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
  569. if (!avctx->hwaccel_context)
  570. return AVERROR(ENOMEM);
  571. return videotoolbox_default_init(avctx);
  572. }
  573. void av_videotoolbox_default_free(AVCodecContext *avctx)
  574. {
  575. videotoolbox_default_free(avctx);
  576. av_freep(&avctx->hwaccel_context);
  577. }
  578. #endif /* CONFIG_VIDEOTOOLBOX */