You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

691 lines
24KB

  1. /*
  2. * Videotoolbox hardware acceleration
  3. *
  4. * copyright (c) 2012 Sebastien Zwickert
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if CONFIG_VIDEOTOOLBOX
  24. # include "videotoolbox.h"
  25. #else
  26. # include "vda.h"
  27. #endif
  28. #include "vda_vt_internal.h"
  29. #include "libavutil/avutil.h"
  30. #include "bytestream.h"
  31. #include "h264.h"
  32. #include "mpegvideo.h"
  33. #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
  34. # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
  35. #endif
  36. #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
  37. static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
  38. {
  39. CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
  40. CVPixelBufferRelease(cv_buffer);
  41. }
  42. static int videotoolbox_buffer_copy(VTContext *vtctx,
  43. const uint8_t *buffer,
  44. uint32_t size)
  45. {
  46. void *tmp;
  47. tmp = av_fast_realloc(vtctx->bitstream,
  48. &vtctx->allocated_size,
  49. size);
  50. if (!tmp)
  51. return AVERROR(ENOMEM);
  52. vtctx->bitstream = tmp;
  53. memcpy(vtctx->bitstream, buffer, size);
  54. vtctx->bitstream_size = size;
  55. return 0;
  56. }
  57. int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
  58. {
  59. frame->width = avctx->width;
  60. frame->height = avctx->height;
  61. frame->format = avctx->pix_fmt;
  62. frame->buf[0] = av_buffer_alloc(1);
  63. if (!frame->buf[0])
  64. return AVERROR(ENOMEM);
  65. return 0;
  66. }
  67. CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
  68. {
  69. CFDataRef data = NULL;
  70. /* Each VCL NAL in the bitstream sent to the decoder
  71. * is preceded by a 4 bytes length header.
  72. * Change the avcC atom header if needed, to signal headers of 4 bytes. */
  73. if (avctx->extradata_size >= 4 && (avctx->extradata[4] & 0x03) != 0x03) {
  74. uint8_t *rw_extradata = av_memdup(avctx->extradata, avctx->extradata_size);
  75. if (!rw_extradata)
  76. return NULL;
  77. rw_extradata[4] |= 0x03;
  78. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, avctx->extradata_size);
  79. av_freep(&rw_extradata);
  80. } else {
  81. data = CFDataCreate(kCFAllocatorDefault, avctx->extradata, avctx->extradata_size);
  82. }
  83. return data;
  84. }
  85. int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
  86. {
  87. av_buffer_unref(&frame->buf[0]);
  88. frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
  89. sizeof(vtctx->frame),
  90. videotoolbox_buffer_release,
  91. NULL,
  92. AV_BUFFER_FLAG_READONLY);
  93. if (!frame->buf[0]) {
  94. return AVERROR(ENOMEM);
  95. }
  96. frame->data[3] = (uint8_t*)vtctx->frame;
  97. vtctx->frame = NULL;
  98. return 0;
  99. }
  100. int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
  101. const uint8_t *buffer,
  102. uint32_t size)
  103. {
  104. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  105. H264Context *h = avctx->priv_data;
  106. vtctx->bitstream_size = 0;
  107. if (h->is_avc == 1) {
  108. return videotoolbox_buffer_copy(vtctx, buffer, size);
  109. }
  110. return 0;
  111. }
  112. int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
  113. const uint8_t *buffer,
  114. uint32_t size)
  115. {
  116. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  117. H264Context *h = avctx->priv_data;
  118. void *tmp;
  119. if (h->is_avc == 1)
  120. return 0;
  121. tmp = av_fast_realloc(vtctx->bitstream,
  122. &vtctx->allocated_size,
  123. vtctx->bitstream_size+size+4);
  124. if (!tmp)
  125. return AVERROR(ENOMEM);
  126. vtctx->bitstream = tmp;
  127. AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
  128. memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
  129. vtctx->bitstream_size += size + 4;
  130. return 0;
  131. }
  132. int ff_videotoolbox_uninit(AVCodecContext *avctx)
  133. {
  134. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  135. if (vtctx) {
  136. av_freep(&vtctx->bitstream);
  137. if (vtctx->frame)
  138. CVPixelBufferRelease(vtctx->frame);
  139. }
  140. return 0;
  141. }
  142. #if CONFIG_VIDEOTOOLBOX
  143. static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
  144. {
  145. int i;
  146. uint8_t b;
  147. for (i = 3; i >= 0; i--) {
  148. b = (length >> (i * 7)) & 0x7F;
  149. if (i != 0)
  150. b |= 0x80;
  151. bytestream2_put_byteu(pb, b);
  152. }
  153. }
  154. static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
  155. {
  156. CFDataRef data;
  157. uint8_t *rw_extradata;
  158. PutByteContext pb;
  159. int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
  160. // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
  161. int config_size = 13 + 5 + avctx->extradata_size;
  162. int s;
  163. if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
  164. return NULL;
  165. bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
  166. bytestream2_put_byteu(&pb, 0); // version
  167. bytestream2_put_ne24(&pb, 0); // flags
  168. // elementary stream descriptor
  169. bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
  170. videotoolbox_write_mp4_descr_length(&pb, full_size);
  171. bytestream2_put_ne16(&pb, 0); // esid
  172. bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
  173. // decoder configuration descriptor
  174. bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
  175. videotoolbox_write_mp4_descr_length(&pb, config_size);
  176. bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
  177. bytestream2_put_byteu(&pb, 0x11); // stream type
  178. bytestream2_put_ne24(&pb, 0); // buffer size
  179. bytestream2_put_ne32(&pb, 0); // max bitrate
  180. bytestream2_put_ne32(&pb, 0); // avg bitrate
  181. // decoder specific descriptor
  182. bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
  183. videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
  184. bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
  185. // SLConfigDescriptor
  186. bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
  187. bytestream2_put_byteu(&pb, 0x01); // length
  188. bytestream2_put_byteu(&pb, 0x02); //
  189. s = bytestream2_size_p(&pb);
  190. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
  191. av_freep(&rw_extradata);
  192. return data;
  193. }
  194. static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
  195. void *buffer,
  196. int size)
  197. {
  198. OSStatus status;
  199. CMBlockBufferRef block_buf;
  200. CMSampleBufferRef sample_buf;
  201. block_buf = NULL;
  202. sample_buf = NULL;
  203. status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
  204. buffer, // memoryBlock
  205. size, // blockLength
  206. kCFAllocatorNull, // blockAllocator
  207. NULL, // customBlockSource
  208. 0, // offsetToData
  209. size, // dataLength
  210. 0, // flags
  211. &block_buf);
  212. if (!status) {
  213. status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
  214. block_buf, // dataBuffer
  215. TRUE, // dataReady
  216. 0, // makeDataReadyCallback
  217. 0, // makeDataReadyRefcon
  218. fmt_desc, // formatDescription
  219. 1, // numSamples
  220. 0, // numSampleTimingEntries
  221. NULL, // sampleTimingArray
  222. 0, // numSampleSizeEntries
  223. NULL, // sampleSizeArray
  224. &sample_buf);
  225. }
  226. if (block_buf)
  227. CFRelease(block_buf);
  228. return sample_buf;
  229. }
  230. static void videotoolbox_decoder_callback(void *opaque,
  231. void *sourceFrameRefCon,
  232. OSStatus status,
  233. VTDecodeInfoFlags flags,
  234. CVImageBufferRef image_buffer,
  235. CMTime pts,
  236. CMTime duration)
  237. {
  238. AVCodecContext *avctx = opaque;
  239. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  240. if (vtctx->frame) {
  241. CVPixelBufferRelease(vtctx->frame);
  242. vtctx->frame = NULL;
  243. }
  244. if (!image_buffer) {
  245. av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
  246. return;
  247. }
  248. vtctx->frame = CVPixelBufferRetain(image_buffer);
  249. }
  250. static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
  251. {
  252. OSStatus status;
  253. CMSampleBufferRef sample_buf;
  254. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  255. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  256. sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
  257. vtctx->bitstream,
  258. vtctx->bitstream_size);
  259. if (!sample_buf)
  260. return -1;
  261. status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
  262. sample_buf,
  263. 0, // decodeFlags
  264. NULL, // sourceFrameRefCon
  265. 0); // infoFlagsOut
  266. if (status == noErr)
  267. status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
  268. CFRelease(sample_buf);
  269. return status;
  270. }
  271. static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
  272. {
  273. int status;
  274. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  275. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  276. if (!videotoolbox->session || !vtctx->bitstream)
  277. return AVERROR_INVALIDDATA;
  278. status = videotoolbox_session_decode_frame(avctx);
  279. if (status) {
  280. av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
  281. return AVERROR_UNKNOWN;
  282. }
  283. if (!vtctx->frame)
  284. return AVERROR_UNKNOWN;
  285. return ff_videotoolbox_buffer_create(vtctx, frame);
  286. }
  287. static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
  288. {
  289. H264Context *h = avctx->priv_data;
  290. AVFrame *frame = h->cur_pic_ptr->f;
  291. return videotoolbox_common_end_frame(avctx, frame);
  292. }
  293. static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
  294. const uint8_t *buffer,
  295. uint32_t size)
  296. {
  297. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  298. return videotoolbox_buffer_copy(vtctx, buffer, size);
  299. }
  300. static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
  301. const uint8_t *buffer,
  302. uint32_t size)
  303. {
  304. return 0;
  305. }
  306. static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
  307. {
  308. MpegEncContext *s = avctx->priv_data;
  309. AVFrame *frame = s->current_picture_ptr->f;
  310. return videotoolbox_common_end_frame(avctx, frame);
  311. }
  312. static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
  313. AVCodecContext *avctx)
  314. {
  315. CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  316. 1,
  317. &kCFTypeDictionaryKeyCallBacks,
  318. &kCFTypeDictionaryValueCallBacks);
  319. CFDictionarySetValue(config_info,
  320. kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder,
  321. kCFBooleanTrue);
  322. if (avctx->extradata_size) {
  323. CFMutableDictionaryRef avc_info;
  324. CFDataRef data = NULL;
  325. avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  326. 1,
  327. &kCFTypeDictionaryKeyCallBacks,
  328. &kCFTypeDictionaryValueCallBacks);
  329. switch (codec_type) {
  330. case kCMVideoCodecType_MPEG4Video :
  331. data = videotoolbox_esds_extradata_create(avctx);
  332. if (data)
  333. CFDictionarySetValue(avc_info, CFSTR("esds"), data);
  334. break;
  335. case kCMVideoCodecType_H264 :
  336. data = ff_videotoolbox_avcc_extradata_create(avctx);
  337. if (data)
  338. CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
  339. break;
  340. default:
  341. break;
  342. }
  343. CFDictionarySetValue(config_info,
  344. kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
  345. avc_info);
  346. if (data)
  347. CFRelease(data);
  348. CFRelease(avc_info);
  349. }
  350. return config_info;
  351. }
  352. static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
  353. int height,
  354. OSType pix_fmt)
  355. {
  356. CFMutableDictionaryRef buffer_attributes;
  357. CFMutableDictionaryRef io_surface_properties;
  358. CFNumberRef cv_pix_fmt;
  359. CFNumberRef w;
  360. CFNumberRef h;
  361. w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
  362. h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
  363. cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
  364. buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
  365. 4,
  366. &kCFTypeDictionaryKeyCallBacks,
  367. &kCFTypeDictionaryValueCallBacks);
  368. io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
  369. 0,
  370. &kCFTypeDictionaryKeyCallBacks,
  371. &kCFTypeDictionaryValueCallBacks);
  372. CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
  373. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
  374. CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
  375. CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
  376. CFRelease(io_surface_properties);
  377. CFRelease(cv_pix_fmt);
  378. CFRelease(w);
  379. CFRelease(h);
  380. return buffer_attributes;
  381. }
  382. static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
  383. CFDictionaryRef decoder_spec,
  384. int width,
  385. int height)
  386. {
  387. CMFormatDescriptionRef cm_fmt_desc;
  388. OSStatus status;
  389. status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
  390. codec_type,
  391. width,
  392. height,
  393. decoder_spec, // Dictionary of extension
  394. &cm_fmt_desc);
  395. if (status)
  396. return NULL;
  397. return cm_fmt_desc;
  398. }
  399. static int videotoolbox_default_init(AVCodecContext *avctx)
  400. {
  401. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  402. OSStatus status;
  403. VTDecompressionOutputCallbackRecord decoder_cb;
  404. CFDictionaryRef decoder_spec;
  405. CFDictionaryRef buf_attr;
  406. int32_t pix_fmt;
  407. if (!videotoolbox) {
  408. av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
  409. return -1;
  410. }
  411. switch( avctx->codec_id ) {
  412. case AV_CODEC_ID_H263 :
  413. videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
  414. break;
  415. case AV_CODEC_ID_H264 :
  416. videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
  417. break;
  418. case AV_CODEC_ID_MPEG1VIDEO :
  419. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
  420. break;
  421. case AV_CODEC_ID_MPEG2VIDEO :
  422. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
  423. break;
  424. case AV_CODEC_ID_MPEG4 :
  425. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
  426. break;
  427. default :
  428. break;
  429. }
  430. pix_fmt = videotoolbox->cv_pix_fmt_type;
  431. decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
  432. videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
  433. decoder_spec,
  434. avctx->width,
  435. avctx->height);
  436. if (!videotoolbox->cm_fmt_desc) {
  437. if (decoder_spec)
  438. CFRelease(decoder_spec);
  439. av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
  440. return -1;
  441. }
  442. buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
  443. avctx->height,
  444. videotoolbox->cv_pix_fmt_type);
  445. decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
  446. decoder_cb.decompressionOutputRefCon = avctx;
  447. status = VTDecompressionSessionCreate(NULL, // allocator
  448. videotoolbox->cm_fmt_desc, // videoFormatDescription
  449. decoder_spec, // videoDecoderSpecification
  450. buf_attr, // destinationImageBufferAttributes
  451. &decoder_cb, // outputCallback
  452. &videotoolbox->session); // decompressionSessionOut
  453. if (decoder_spec)
  454. CFRelease(decoder_spec);
  455. if (buf_attr)
  456. CFRelease(buf_attr);
  457. switch (status) {
  458. case kVTVideoDecoderNotAvailableNowErr:
  459. case kVTVideoDecoderUnsupportedDataFormatErr:
  460. return AVERROR(ENOSYS);
  461. case kVTVideoDecoderMalfunctionErr:
  462. return AVERROR(EINVAL);
  463. case kVTVideoDecoderBadDataErr :
  464. return AVERROR_INVALIDDATA;
  465. case 0:
  466. return 0;
  467. default:
  468. return AVERROR_UNKNOWN;
  469. }
  470. }
  471. static void videotoolbox_default_free(AVCodecContext *avctx)
  472. {
  473. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  474. if (videotoolbox) {
  475. if (videotoolbox->cm_fmt_desc)
  476. CFRelease(videotoolbox->cm_fmt_desc);
  477. if (videotoolbox->session)
  478. VTDecompressionSessionInvalidate(videotoolbox->session);
  479. }
  480. }
  481. AVHWAccel ff_h263_videotoolbox_hwaccel = {
  482. .name = "h263_videotoolbox",
  483. .type = AVMEDIA_TYPE_VIDEO,
  484. .id = AV_CODEC_ID_H263,
  485. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  486. .alloc_frame = ff_videotoolbox_alloc_frame,
  487. .start_frame = videotoolbox_mpeg_start_frame,
  488. .decode_slice = videotoolbox_mpeg_decode_slice,
  489. .end_frame = videotoolbox_mpeg_end_frame,
  490. .uninit = ff_videotoolbox_uninit,
  491. .priv_data_size = sizeof(VTContext),
  492. };
  493. AVHWAccel ff_h264_videotoolbox_hwaccel = {
  494. .name = "h264_videotoolbox",
  495. .type = AVMEDIA_TYPE_VIDEO,
  496. .id = AV_CODEC_ID_H264,
  497. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  498. .alloc_frame = ff_videotoolbox_alloc_frame,
  499. .start_frame = ff_videotoolbox_h264_start_frame,
  500. .decode_slice = ff_videotoolbox_h264_decode_slice,
  501. .end_frame = videotoolbox_h264_end_frame,
  502. .uninit = ff_videotoolbox_uninit,
  503. .priv_data_size = sizeof(VTContext),
  504. };
  505. AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
  506. .name = "mpeg1_videotoolbox",
  507. .type = AVMEDIA_TYPE_VIDEO,
  508. .id = AV_CODEC_ID_MPEG1VIDEO,
  509. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  510. .alloc_frame = ff_videotoolbox_alloc_frame,
  511. .start_frame = videotoolbox_mpeg_start_frame,
  512. .decode_slice = videotoolbox_mpeg_decode_slice,
  513. .end_frame = videotoolbox_mpeg_end_frame,
  514. .uninit = ff_videotoolbox_uninit,
  515. .priv_data_size = sizeof(VTContext),
  516. };
  517. AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
  518. .name = "mpeg2_videotoolbox",
  519. .type = AVMEDIA_TYPE_VIDEO,
  520. .id = AV_CODEC_ID_MPEG2VIDEO,
  521. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  522. .alloc_frame = ff_videotoolbox_alloc_frame,
  523. .start_frame = videotoolbox_mpeg_start_frame,
  524. .decode_slice = videotoolbox_mpeg_decode_slice,
  525. .end_frame = videotoolbox_mpeg_end_frame,
  526. .uninit = ff_videotoolbox_uninit,
  527. .priv_data_size = sizeof(VTContext),
  528. };
  529. AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
  530. .name = "mpeg4_videotoolbox",
  531. .type = AVMEDIA_TYPE_VIDEO,
  532. .id = AV_CODEC_ID_MPEG4,
  533. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  534. .alloc_frame = ff_videotoolbox_alloc_frame,
  535. .start_frame = videotoolbox_mpeg_start_frame,
  536. .decode_slice = videotoolbox_mpeg_decode_slice,
  537. .end_frame = videotoolbox_mpeg_end_frame,
  538. .uninit = ff_videotoolbox_uninit,
  539. .priv_data_size = sizeof(VTContext),
  540. };
  541. AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
  542. {
  543. AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
  544. if (ret) {
  545. ret->output_callback = videotoolbox_decoder_callback;
  546. ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  547. }
  548. return ret;
  549. }
  550. int av_videotoolbox_default_init(AVCodecContext *avctx)
  551. {
  552. return av_videotoolbox_default_init2(avctx, NULL);
  553. }
  554. int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
  555. {
  556. avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
  557. if (!avctx->hwaccel_context)
  558. return AVERROR(ENOMEM);
  559. return videotoolbox_default_init(avctx);
  560. }
  561. void av_videotoolbox_default_free(AVCodecContext *avctx)
  562. {
  563. videotoolbox_default_free(avctx);
  564. av_freep(&avctx->hwaccel_context);
  565. }
  566. #endif /* CONFIG_VIDEOTOOLBOX */