You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

703 lines
24KB

  1. /*
  2. * Videotoolbox hardware acceleration
  3. *
  4. * copyright (c) 2012 Sebastien Zwickert
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if CONFIG_VIDEOTOOLBOX
  24. # include "videotoolbox.h"
  25. #else
  26. # include "vda.h"
  27. #endif
  28. #include "vda_vt_internal.h"
  29. #include "libavutil/avutil.h"
  30. #include "bytestream.h"
  31. #include "h264dec.h"
  32. #include "mpegvideo.h"
  33. #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
  34. # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
  35. #endif
  36. #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
  37. static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
  38. {
  39. CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
  40. CVPixelBufferRelease(cv_buffer);
  41. }
  42. static int videotoolbox_buffer_copy(VTContext *vtctx,
  43. const uint8_t *buffer,
  44. uint32_t size)
  45. {
  46. void *tmp;
  47. tmp = av_fast_realloc(vtctx->bitstream,
  48. &vtctx->allocated_size,
  49. size);
  50. if (!tmp)
  51. return AVERROR(ENOMEM);
  52. vtctx->bitstream = tmp;
  53. memcpy(vtctx->bitstream, buffer, size);
  54. vtctx->bitstream_size = size;
  55. return 0;
  56. }
  57. int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
  58. {
  59. frame->width = avctx->width;
  60. frame->height = avctx->height;
  61. frame->format = avctx->pix_fmt;
  62. frame->buf[0] = av_buffer_alloc(1);
  63. if (!frame->buf[0])
  64. return AVERROR(ENOMEM);
  65. return 0;
  66. }
  67. #define AV_W8(p, v) *(p) = (v)
  68. CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
  69. {
  70. H264Context *h = avctx->priv_data;
  71. CFDataRef data = NULL;
  72. uint8_t *p;
  73. int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
  74. uint8_t *vt_extradata = av_malloc(vt_extradata_size);
  75. if (!vt_extradata)
  76. return NULL;
  77. p = vt_extradata;
  78. AV_W8(p + 0, 1); /* version */
  79. AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
  80. AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
  81. AV_W8(p + 3, h->ps.sps->data[3]); /* level */
  82. AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
  83. AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
  84. AV_WB16(p + 6, h->ps.sps->data_size);
  85. memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
  86. p += 8 + h->ps.sps->data_size;
  87. AV_W8(p + 0, 1); /* number of pps */
  88. AV_WB16(p + 1, h->ps.pps->data_size);
  89. memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
  90. p += 3 + h->ps.pps->data_size;
  91. av_assert0(p - vt_extradata == vt_extradata_size);
  92. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  93. av_free(vt_extradata);
  94. return data;
  95. }
  96. int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
  97. {
  98. av_buffer_unref(&frame->buf[0]);
  99. frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
  100. sizeof(vtctx->frame),
  101. videotoolbox_buffer_release,
  102. NULL,
  103. AV_BUFFER_FLAG_READONLY);
  104. if (!frame->buf[0]) {
  105. return AVERROR(ENOMEM);
  106. }
  107. frame->data[3] = (uint8_t*)vtctx->frame;
  108. vtctx->frame = NULL;
  109. return 0;
  110. }
  111. int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
  112. const uint8_t *buffer,
  113. uint32_t size)
  114. {
  115. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  116. H264Context *h = avctx->priv_data;
  117. vtctx->bitstream_size = 0;
  118. if (h->is_avc == 1) {
  119. return videotoolbox_buffer_copy(vtctx, buffer, size);
  120. }
  121. return 0;
  122. }
  123. int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
  124. const uint8_t *buffer,
  125. uint32_t size)
  126. {
  127. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  128. H264Context *h = avctx->priv_data;
  129. void *tmp;
  130. if (h->is_avc == 1)
  131. return 0;
  132. tmp = av_fast_realloc(vtctx->bitstream,
  133. &vtctx->allocated_size,
  134. vtctx->bitstream_size+size+4);
  135. if (!tmp)
  136. return AVERROR(ENOMEM);
  137. vtctx->bitstream = tmp;
  138. AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
  139. memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
  140. vtctx->bitstream_size += size + 4;
  141. return 0;
  142. }
  143. int ff_videotoolbox_uninit(AVCodecContext *avctx)
  144. {
  145. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  146. if (vtctx) {
  147. av_freep(&vtctx->bitstream);
  148. if (vtctx->frame)
  149. CVPixelBufferRelease(vtctx->frame);
  150. }
  151. return 0;
  152. }
  153. #if CONFIG_VIDEOTOOLBOX
  154. static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
  155. {
  156. int i;
  157. uint8_t b;
  158. for (i = 3; i >= 0; i--) {
  159. b = (length >> (i * 7)) & 0x7F;
  160. if (i != 0)
  161. b |= 0x80;
  162. bytestream2_put_byteu(pb, b);
  163. }
  164. }
  165. static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
  166. {
  167. CFDataRef data;
  168. uint8_t *rw_extradata;
  169. PutByteContext pb;
  170. int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
  171. // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
  172. int config_size = 13 + 5 + avctx->extradata_size;
  173. int s;
  174. if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
  175. return NULL;
  176. bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
  177. bytestream2_put_byteu(&pb, 0); // version
  178. bytestream2_put_ne24(&pb, 0); // flags
  179. // elementary stream descriptor
  180. bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
  181. videotoolbox_write_mp4_descr_length(&pb, full_size);
  182. bytestream2_put_ne16(&pb, 0); // esid
  183. bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
  184. // decoder configuration descriptor
  185. bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
  186. videotoolbox_write_mp4_descr_length(&pb, config_size);
  187. bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
  188. bytestream2_put_byteu(&pb, 0x11); // stream type
  189. bytestream2_put_ne24(&pb, 0); // buffer size
  190. bytestream2_put_ne32(&pb, 0); // max bitrate
  191. bytestream2_put_ne32(&pb, 0); // avg bitrate
  192. // decoder specific descriptor
  193. bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
  194. videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
  195. bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
  196. // SLConfigDescriptor
  197. bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
  198. bytestream2_put_byteu(&pb, 0x01); // length
  199. bytestream2_put_byteu(&pb, 0x02); //
  200. s = bytestream2_size_p(&pb);
  201. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
  202. av_freep(&rw_extradata);
  203. return data;
  204. }
  205. static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
  206. void *buffer,
  207. int size)
  208. {
  209. OSStatus status;
  210. CMBlockBufferRef block_buf;
  211. CMSampleBufferRef sample_buf;
  212. block_buf = NULL;
  213. sample_buf = NULL;
  214. status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
  215. buffer, // memoryBlock
  216. size, // blockLength
  217. kCFAllocatorNull, // blockAllocator
  218. NULL, // customBlockSource
  219. 0, // offsetToData
  220. size, // dataLength
  221. 0, // flags
  222. &block_buf);
  223. if (!status) {
  224. status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
  225. block_buf, // dataBuffer
  226. TRUE, // dataReady
  227. 0, // makeDataReadyCallback
  228. 0, // makeDataReadyRefcon
  229. fmt_desc, // formatDescription
  230. 1, // numSamples
  231. 0, // numSampleTimingEntries
  232. NULL, // sampleTimingArray
  233. 0, // numSampleSizeEntries
  234. NULL, // sampleSizeArray
  235. &sample_buf);
  236. }
  237. if (block_buf)
  238. CFRelease(block_buf);
  239. return sample_buf;
  240. }
  241. static void videotoolbox_decoder_callback(void *opaque,
  242. void *sourceFrameRefCon,
  243. OSStatus status,
  244. VTDecodeInfoFlags flags,
  245. CVImageBufferRef image_buffer,
  246. CMTime pts,
  247. CMTime duration)
  248. {
  249. AVCodecContext *avctx = opaque;
  250. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  251. if (vtctx->frame) {
  252. CVPixelBufferRelease(vtctx->frame);
  253. vtctx->frame = NULL;
  254. }
  255. if (!image_buffer) {
  256. av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
  257. return;
  258. }
  259. vtctx->frame = CVPixelBufferRetain(image_buffer);
  260. }
  261. static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
  262. {
  263. OSStatus status;
  264. CMSampleBufferRef sample_buf;
  265. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  266. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  267. sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
  268. vtctx->bitstream,
  269. vtctx->bitstream_size);
  270. if (!sample_buf)
  271. return -1;
  272. status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
  273. sample_buf,
  274. 0, // decodeFlags
  275. NULL, // sourceFrameRefCon
  276. 0); // infoFlagsOut
  277. if (status == noErr)
  278. status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
  279. CFRelease(sample_buf);
  280. return status;
  281. }
  282. static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
  283. {
  284. int status;
  285. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  286. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  287. av_buffer_unref(&frame->buf[0]);
  288. if (!videotoolbox->session || !vtctx->bitstream)
  289. return AVERROR_INVALIDDATA;
  290. status = videotoolbox_session_decode_frame(avctx);
  291. if (status) {
  292. av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
  293. return AVERROR_UNKNOWN;
  294. }
  295. if (!vtctx->frame)
  296. return AVERROR_UNKNOWN;
  297. return ff_videotoolbox_buffer_create(vtctx, frame);
  298. }
  299. static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
  300. {
  301. H264Context *h = avctx->priv_data;
  302. AVFrame *frame = h->cur_pic_ptr->f;
  303. return videotoolbox_common_end_frame(avctx, frame);
  304. }
  305. static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
  306. const uint8_t *buffer,
  307. uint32_t size)
  308. {
  309. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  310. return videotoolbox_buffer_copy(vtctx, buffer, size);
  311. }
  312. static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
  313. const uint8_t *buffer,
  314. uint32_t size)
  315. {
  316. return 0;
  317. }
  318. static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
  319. {
  320. MpegEncContext *s = avctx->priv_data;
  321. AVFrame *frame = s->current_picture_ptr->f;
  322. return videotoolbox_common_end_frame(avctx, frame);
  323. }
  324. static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
  325. AVCodecContext *avctx)
  326. {
  327. CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  328. 0,
  329. &kCFTypeDictionaryKeyCallBacks,
  330. &kCFTypeDictionaryValueCallBacks);
  331. CFDictionarySetValue(config_info,
  332. kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
  333. kCFBooleanTrue);
  334. if (avctx->extradata_size) {
  335. CFMutableDictionaryRef avc_info;
  336. CFDataRef data = NULL;
  337. avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  338. 1,
  339. &kCFTypeDictionaryKeyCallBacks,
  340. &kCFTypeDictionaryValueCallBacks);
  341. switch (codec_type) {
  342. case kCMVideoCodecType_MPEG4Video :
  343. data = videotoolbox_esds_extradata_create(avctx);
  344. if (data)
  345. CFDictionarySetValue(avc_info, CFSTR("esds"), data);
  346. break;
  347. case kCMVideoCodecType_H264 :
  348. data = ff_videotoolbox_avcc_extradata_create(avctx);
  349. if (data)
  350. CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
  351. break;
  352. default:
  353. break;
  354. }
  355. CFDictionarySetValue(config_info,
  356. kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
  357. avc_info);
  358. if (data)
  359. CFRelease(data);
  360. CFRelease(avc_info);
  361. }
  362. return config_info;
  363. }
  364. static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
  365. int height,
  366. OSType pix_fmt)
  367. {
  368. CFMutableDictionaryRef buffer_attributes;
  369. CFMutableDictionaryRef io_surface_properties;
  370. CFNumberRef cv_pix_fmt;
  371. CFNumberRef w;
  372. CFNumberRef h;
  373. w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
  374. h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
  375. cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
  376. buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
  377. 4,
  378. &kCFTypeDictionaryKeyCallBacks,
  379. &kCFTypeDictionaryValueCallBacks);
  380. io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
  381. 0,
  382. &kCFTypeDictionaryKeyCallBacks,
  383. &kCFTypeDictionaryValueCallBacks);
  384. if (pix_fmt)
  385. CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
  386. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
  387. CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
  388. CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
  389. CFRelease(io_surface_properties);
  390. CFRelease(cv_pix_fmt);
  391. CFRelease(w);
  392. CFRelease(h);
  393. return buffer_attributes;
  394. }
  395. static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
  396. CFDictionaryRef decoder_spec,
  397. int width,
  398. int height)
  399. {
  400. CMFormatDescriptionRef cm_fmt_desc;
  401. OSStatus status;
  402. status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
  403. codec_type,
  404. width,
  405. height,
  406. decoder_spec, // Dictionary of extension
  407. &cm_fmt_desc);
  408. if (status)
  409. return NULL;
  410. return cm_fmt_desc;
  411. }
  412. static int videotoolbox_default_init(AVCodecContext *avctx)
  413. {
  414. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  415. OSStatus status;
  416. VTDecompressionOutputCallbackRecord decoder_cb;
  417. CFDictionaryRef decoder_spec;
  418. CFDictionaryRef buf_attr;
  419. if (!videotoolbox) {
  420. av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
  421. return -1;
  422. }
  423. switch( avctx->codec_id ) {
  424. case AV_CODEC_ID_H263 :
  425. videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
  426. break;
  427. case AV_CODEC_ID_H264 :
  428. videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
  429. break;
  430. case AV_CODEC_ID_MPEG1VIDEO :
  431. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
  432. break;
  433. case AV_CODEC_ID_MPEG2VIDEO :
  434. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
  435. break;
  436. case AV_CODEC_ID_MPEG4 :
  437. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
  438. break;
  439. default :
  440. break;
  441. }
  442. decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
  443. videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
  444. decoder_spec,
  445. avctx->width,
  446. avctx->height);
  447. if (!videotoolbox->cm_fmt_desc) {
  448. if (decoder_spec)
  449. CFRelease(decoder_spec);
  450. av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
  451. return -1;
  452. }
  453. buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
  454. avctx->height,
  455. videotoolbox->cv_pix_fmt_type);
  456. decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
  457. decoder_cb.decompressionOutputRefCon = avctx;
  458. status = VTDecompressionSessionCreate(NULL, // allocator
  459. videotoolbox->cm_fmt_desc, // videoFormatDescription
  460. decoder_spec, // videoDecoderSpecification
  461. buf_attr, // destinationImageBufferAttributes
  462. &decoder_cb, // outputCallback
  463. &videotoolbox->session); // decompressionSessionOut
  464. if (decoder_spec)
  465. CFRelease(decoder_spec);
  466. if (buf_attr)
  467. CFRelease(buf_attr);
  468. switch (status) {
  469. case kVTVideoDecoderNotAvailableNowErr:
  470. case kVTVideoDecoderUnsupportedDataFormatErr:
  471. return AVERROR(ENOSYS);
  472. case kVTVideoDecoderMalfunctionErr:
  473. return AVERROR(EINVAL);
  474. case kVTVideoDecoderBadDataErr :
  475. return AVERROR_INVALIDDATA;
  476. case 0:
  477. return 0;
  478. default:
  479. return AVERROR_UNKNOWN;
  480. }
  481. }
  482. static void videotoolbox_default_free(AVCodecContext *avctx)
  483. {
  484. AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
  485. if (videotoolbox) {
  486. if (videotoolbox->cm_fmt_desc)
  487. CFRelease(videotoolbox->cm_fmt_desc);
  488. if (videotoolbox->session) {
  489. VTDecompressionSessionInvalidate(videotoolbox->session);
  490. CFRelease(videotoolbox->session);
  491. }
  492. }
  493. }
  494. AVHWAccel ff_h263_videotoolbox_hwaccel = {
  495. .name = "h263_videotoolbox",
  496. .type = AVMEDIA_TYPE_VIDEO,
  497. .id = AV_CODEC_ID_H263,
  498. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  499. .alloc_frame = ff_videotoolbox_alloc_frame,
  500. .start_frame = videotoolbox_mpeg_start_frame,
  501. .decode_slice = videotoolbox_mpeg_decode_slice,
  502. .end_frame = videotoolbox_mpeg_end_frame,
  503. .uninit = ff_videotoolbox_uninit,
  504. .priv_data_size = sizeof(VTContext),
  505. };
  506. AVHWAccel ff_h264_videotoolbox_hwaccel = {
  507. .name = "h264_videotoolbox",
  508. .type = AVMEDIA_TYPE_VIDEO,
  509. .id = AV_CODEC_ID_H264,
  510. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  511. .alloc_frame = ff_videotoolbox_alloc_frame,
  512. .start_frame = ff_videotoolbox_h264_start_frame,
  513. .decode_slice = ff_videotoolbox_h264_decode_slice,
  514. .end_frame = videotoolbox_h264_end_frame,
  515. .uninit = ff_videotoolbox_uninit,
  516. .priv_data_size = sizeof(VTContext),
  517. };
  518. AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
  519. .name = "mpeg1_videotoolbox",
  520. .type = AVMEDIA_TYPE_VIDEO,
  521. .id = AV_CODEC_ID_MPEG1VIDEO,
  522. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  523. .alloc_frame = ff_videotoolbox_alloc_frame,
  524. .start_frame = videotoolbox_mpeg_start_frame,
  525. .decode_slice = videotoolbox_mpeg_decode_slice,
  526. .end_frame = videotoolbox_mpeg_end_frame,
  527. .uninit = ff_videotoolbox_uninit,
  528. .priv_data_size = sizeof(VTContext),
  529. };
  530. AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
  531. .name = "mpeg2_videotoolbox",
  532. .type = AVMEDIA_TYPE_VIDEO,
  533. .id = AV_CODEC_ID_MPEG2VIDEO,
  534. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  535. .alloc_frame = ff_videotoolbox_alloc_frame,
  536. .start_frame = videotoolbox_mpeg_start_frame,
  537. .decode_slice = videotoolbox_mpeg_decode_slice,
  538. .end_frame = videotoolbox_mpeg_end_frame,
  539. .uninit = ff_videotoolbox_uninit,
  540. .priv_data_size = sizeof(VTContext),
  541. };
  542. AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
  543. .name = "mpeg4_videotoolbox",
  544. .type = AVMEDIA_TYPE_VIDEO,
  545. .id = AV_CODEC_ID_MPEG4,
  546. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  547. .alloc_frame = ff_videotoolbox_alloc_frame,
  548. .start_frame = videotoolbox_mpeg_start_frame,
  549. .decode_slice = videotoolbox_mpeg_decode_slice,
  550. .end_frame = videotoolbox_mpeg_end_frame,
  551. .uninit = ff_videotoolbox_uninit,
  552. .priv_data_size = sizeof(VTContext),
  553. };
  554. AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
  555. {
  556. AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
  557. if (ret) {
  558. ret->output_callback = videotoolbox_decoder_callback;
  559. ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  560. }
  561. return ret;
  562. }
  563. int av_videotoolbox_default_init(AVCodecContext *avctx)
  564. {
  565. return av_videotoolbox_default_init2(avctx, NULL);
  566. }
  567. int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
  568. {
  569. avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
  570. if (!avctx->hwaccel_context)
  571. return AVERROR(ENOMEM);
  572. return videotoolbox_default_init(avctx);
  573. }
  574. void av_videotoolbox_default_free(AVCodecContext *avctx)
  575. {
  576. videotoolbox_default_free(avctx);
  577. av_freep(&avctx->hwaccel_context);
  578. }
  579. #endif /* CONFIG_VIDEOTOOLBOX */