You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1200 lines
40KB

  1. /*
  2. * Videotoolbox hardware acceleration
  3. *
  4. * copyright (c) 2012 Sebastien Zwickert
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #include "videotoolbox.h"
  24. #include "libavutil/hwcontext_videotoolbox.h"
  25. #include "vt_internal.h"
  26. #include "libavutil/avutil.h"
  27. #include "libavutil/hwcontext.h"
  28. #include "bytestream.h"
  29. #include "decode.h"
  30. #include "h264dec.h"
  31. #include "hevcdec.h"
  32. #include "mpegvideo.h"
  33. #include <TargetConditionals.h>
  34. #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
  35. # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
  36. #endif
  37. #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
  38. # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
  39. #endif
  40. #if !HAVE_KCMVIDEOCODECTYPE_HEVC
  41. enum { kCMVideoCodecType_HEVC = 'hvc1' };
  42. #endif
  43. #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
  44. static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
  45. {
  46. CVPixelBufferRef cv_buffer = *(CVPixelBufferRef *)data;
  47. CVPixelBufferRelease(cv_buffer);
  48. av_free(data);
  49. }
  50. static int videotoolbox_buffer_copy(VTContext *vtctx,
  51. const uint8_t *buffer,
  52. uint32_t size)
  53. {
  54. void *tmp;
  55. tmp = av_fast_realloc(vtctx->bitstream,
  56. &vtctx->allocated_size,
  57. size);
  58. if (!tmp)
  59. return AVERROR(ENOMEM);
  60. vtctx->bitstream = tmp;
  61. memcpy(vtctx->bitstream, buffer, size);
  62. vtctx->bitstream_size = size;
  63. return 0;
  64. }
  65. static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
  66. {
  67. CVPixelBufferRef ref = *(CVPixelBufferRef *)frame->buf[0]->data;
  68. if (!ref) {
  69. av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
  70. av_frame_unref(frame);
  71. return AVERROR_EXTERNAL;
  72. }
  73. frame->data[3] = (uint8_t*)ref;
  74. return 0;
  75. }
  76. int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
  77. {
  78. size_t size = sizeof(CVPixelBufferRef);
  79. uint8_t *data = NULL;
  80. AVBufferRef *buf = NULL;
  81. int ret = ff_attach_decode_data(frame);
  82. FrameDecodeData *fdd;
  83. if (ret < 0)
  84. return ret;
  85. data = av_mallocz(size);
  86. if (!data)
  87. return AVERROR(ENOMEM);
  88. buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
  89. if (!buf) {
  90. av_freep(&data);
  91. return AVERROR(ENOMEM);
  92. }
  93. frame->buf[0] = buf;
  94. fdd = (FrameDecodeData*)frame->private_ref->data;
  95. fdd->post_process = videotoolbox_postproc_frame;
  96. frame->width = avctx->width;
  97. frame->height = avctx->height;
  98. frame->format = avctx->pix_fmt;
  99. return 0;
  100. }
  101. #define AV_W8(p, v) *(p) = (v)
  102. CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
  103. {
  104. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  105. H264Context *h = avctx->priv_data;
  106. CFDataRef data = NULL;
  107. uint8_t *p;
  108. int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
  109. uint8_t *vt_extradata = av_malloc(vt_extradata_size);
  110. if (!vt_extradata)
  111. return NULL;
  112. p = vt_extradata;
  113. AV_W8(p + 0, 1); /* version */
  114. AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
  115. AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
  116. AV_W8(p + 3, h->ps.sps->data[3]); /* level */
  117. AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
  118. AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
  119. AV_WB16(p + 6, h->ps.sps->data_size);
  120. memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
  121. p += 8 + h->ps.sps->data_size;
  122. AV_W8(p + 0, 1); /* number of pps */
  123. AV_WB16(p + 1, h->ps.pps->data_size);
  124. memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
  125. p += 3 + h->ps.pps->data_size;
  126. av_assert0(p - vt_extradata == vt_extradata_size);
  127. // save sps header (profile/level) used to create decoder session,
  128. // so we can detect changes and recreate it.
  129. if (vtctx)
  130. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  131. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  132. av_free(vt_extradata);
  133. return data;
  134. }
  135. CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
  136. {
  137. HEVCContext *h = avctx->priv_data;
  138. const HEVCVPS *vps = (const HEVCVPS *)h->ps.vps_list[0]->data;
  139. const HEVCSPS *sps = (const HEVCSPS *)h->ps.sps_list[0]->data;
  140. int i, num_pps = 0;
  141. const HEVCPPS *pps = h->ps.pps;
  142. PTLCommon ptlc = vps->ptl.general_ptl;
  143. VUI vui = sps->vui;
  144. uint8_t parallelismType;
  145. CFDataRef data = NULL;
  146. uint8_t *p;
  147. int vt_extradata_size = 23 + 5 + vps->data_size + 5 + sps->data_size + 3;
  148. uint8_t *vt_extradata;
  149. for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
  150. if (h->ps.pps_list[i]) {
  151. const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
  152. vt_extradata_size += 2 + pps->data_size;
  153. num_pps++;
  154. }
  155. }
  156. vt_extradata = av_malloc(vt_extradata_size);
  157. if (!vt_extradata)
  158. return NULL;
  159. p = vt_extradata;
  160. /* unsigned int(8) configurationVersion = 1; */
  161. AV_W8(p + 0, 1);
  162. /*
  163. * unsigned int(2) general_profile_space;
  164. * unsigned int(1) general_tier_flag;
  165. * unsigned int(5) general_profile_idc;
  166. */
  167. AV_W8(p + 1, ptlc.profile_space << 6 |
  168. ptlc.tier_flag << 5 |
  169. ptlc.profile_idc);
  170. /* unsigned int(32) general_profile_compatibility_flags; */
  171. memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
  172. /* unsigned int(48) general_constraint_indicator_flags; */
  173. AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
  174. ptlc.interlaced_source_flag << 6 |
  175. ptlc.non_packed_constraint_flag << 5 |
  176. ptlc.frame_only_constraint_flag << 4);
  177. AV_W8(p + 7, 0);
  178. AV_WN32(p + 8, 0);
  179. /* unsigned int(8) general_level_idc; */
  180. AV_W8(p + 12, ptlc.level_idc);
  181. /*
  182. * bit(4) reserved = ‘1111’b;
  183. * unsigned int(12) min_spatial_segmentation_idc;
  184. */
  185. AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
  186. AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
  187. /*
  188. * bit(6) reserved = ‘111111’b;
  189. * unsigned int(2) parallelismType;
  190. */
  191. if (!vui.min_spatial_segmentation_idc)
  192. parallelismType = 0;
  193. else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
  194. parallelismType = 0;
  195. else if (pps->entropy_coding_sync_enabled_flag)
  196. parallelismType = 3;
  197. else if (pps->tiles_enabled_flag)
  198. parallelismType = 2;
  199. else
  200. parallelismType = 1;
  201. AV_W8(p + 15, 0xfc | parallelismType);
  202. /*
  203. * bit(6) reserved = ‘111111’b;
  204. * unsigned int(2) chromaFormat;
  205. */
  206. AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
  207. /*
  208. * bit(5) reserved = ‘11111’b;
  209. * unsigned int(3) bitDepthLumaMinus8;
  210. */
  211. AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
  212. /*
  213. * bit(5) reserved = ‘11111’b;
  214. * unsigned int(3) bitDepthChromaMinus8;
  215. */
  216. AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
  217. /* bit(16) avgFrameRate; */
  218. AV_WB16(p + 19, 0);
  219. /*
  220. * bit(2) constantFrameRate;
  221. * bit(3) numTemporalLayers;
  222. * bit(1) temporalIdNested;
  223. * unsigned int(2) lengthSizeMinusOne;
  224. */
  225. AV_W8(p + 21, 0 << 6 |
  226. sps->max_sub_layers << 3 |
  227. sps->temporal_id_nesting_flag << 2 |
  228. 3);
  229. /* unsigned int(8) numOfArrays; */
  230. AV_W8(p + 22, 3);
  231. p += 23;
  232. /* vps */
  233. /*
  234. * bit(1) array_completeness;
  235. * unsigned int(1) reserved = 0;
  236. * unsigned int(6) NAL_unit_type;
  237. */
  238. AV_W8(p, 1 << 7 |
  239. HEVC_NAL_VPS & 0x3f);
  240. /* unsigned int(16) numNalus; */
  241. AV_WB16(p + 1, 1);
  242. /* unsigned int(16) nalUnitLength; */
  243. AV_WB16(p + 3, vps->data_size);
  244. /* bit(8*nalUnitLength) nalUnit; */
  245. memcpy(p + 5, vps->data, vps->data_size);
  246. p += 5 + vps->data_size;
  247. /* sps */
  248. AV_W8(p, 1 << 7 |
  249. HEVC_NAL_SPS & 0x3f);
  250. AV_WB16(p + 1, 1);
  251. AV_WB16(p + 3, sps->data_size);
  252. memcpy(p + 5, sps->data, sps->data_size);
  253. p += 5 + sps->data_size;
  254. /* pps */
  255. AV_W8(p, 1 << 7 |
  256. HEVC_NAL_PPS & 0x3f);
  257. AV_WB16(p + 1, num_pps);
  258. p += 3;
  259. for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
  260. if (h->ps.pps_list[i]) {
  261. const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
  262. AV_WB16(p, pps->data_size);
  263. memcpy(p + 2, pps->data, pps->data_size);
  264. p += 2 + pps->data_size;
  265. }
  266. }
  267. av_assert0(p - vt_extradata == vt_extradata_size);
  268. data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
  269. av_free(vt_extradata);
  270. return data;
  271. }
  272. static int videotoolbox_set_frame(AVCodecContext *avctx, AVFrame *frame)
  273. {
  274. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  275. if (!frame->buf[0] || frame->data[3]) {
  276. av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
  277. av_frame_unref(frame);
  278. return AVERROR_EXTERNAL;
  279. }
  280. CVPixelBufferRef *ref = (CVPixelBufferRef *)frame->buf[0]->data;
  281. if (*ref)
  282. CVPixelBufferRelease(*ref);
  283. *ref = vtctx->frame;
  284. vtctx->frame = NULL;
  285. return 0;
  286. }
  287. int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
  288. const uint8_t *buffer,
  289. uint32_t size)
  290. {
  291. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  292. H264Context *h = avctx->priv_data;
  293. if (h->is_avc == 1) {
  294. return videotoolbox_buffer_copy(vtctx, buffer, size);
  295. }
  296. return 0;
  297. }
  298. static int videotoolbox_h264_decode_params(AVCodecContext *avctx,
  299. int type,
  300. const uint8_t *buffer,
  301. uint32_t size)
  302. {
  303. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  304. H264Context *h = avctx->priv_data;
  305. // save sps header (profile/level) used to create decoder session
  306. if (!vtctx->sps[0])
  307. memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
  308. if (type == H264_NAL_SPS) {
  309. if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
  310. vtctx->reconfig_needed = true;
  311. memcpy(vtctx->sps, buffer + 1, 3);
  312. }
  313. }
  314. // pass-through SPS/PPS changes to the decoder
  315. return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
  316. }
  317. int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
  318. const uint8_t *buffer,
  319. uint32_t size)
  320. {
  321. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  322. H264Context *h = avctx->priv_data;
  323. void *tmp;
  324. if (h->is_avc == 1)
  325. return 0;
  326. tmp = av_fast_realloc(vtctx->bitstream,
  327. &vtctx->allocated_size,
  328. vtctx->bitstream_size+size+4);
  329. if (!tmp)
  330. return AVERROR(ENOMEM);
  331. vtctx->bitstream = tmp;
  332. AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
  333. memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
  334. vtctx->bitstream_size += size + 4;
  335. return 0;
  336. }
  337. int ff_videotoolbox_uninit(AVCodecContext *avctx)
  338. {
  339. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  340. if (vtctx) {
  341. av_freep(&vtctx->bitstream);
  342. if (vtctx->frame)
  343. CVPixelBufferRelease(vtctx->frame);
  344. }
  345. return 0;
  346. }
  347. #if CONFIG_VIDEOTOOLBOX
  348. // Return the AVVideotoolboxContext that matters currently. Where it comes from
  349. // depends on the API used.
  350. static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
  351. {
  352. // Somewhat tricky because the user can call av_videotoolbox_default_free()
  353. // at any time, even when the codec is closed.
  354. if (avctx->internal && avctx->internal->hwaccel_priv_data) {
  355. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  356. if (vtctx->vt_ctx)
  357. return vtctx->vt_ctx;
  358. }
  359. return avctx->hwaccel_context;
  360. }
  361. static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
  362. {
  363. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  364. CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
  365. OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
  366. enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
  367. int width = CVPixelBufferGetWidth(pixbuf);
  368. int height = CVPixelBufferGetHeight(pixbuf);
  369. AVHWFramesContext *cached_frames;
  370. int ret;
  371. ret = videotoolbox_set_frame(avctx, frame);
  372. if (ret < 0)
  373. return ret;
  374. // Old API code path.
  375. if (!vtctx->cached_hw_frames_ctx)
  376. return 0;
  377. cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
  378. if (cached_frames->sw_format != sw_format ||
  379. cached_frames->width != width ||
  380. cached_frames->height != height) {
  381. AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
  382. AVHWFramesContext *hw_frames;
  383. if (!hw_frames_ctx)
  384. return AVERROR(ENOMEM);
  385. hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
  386. hw_frames->format = cached_frames->format;
  387. hw_frames->sw_format = sw_format;
  388. hw_frames->width = width;
  389. hw_frames->height = height;
  390. ret = av_hwframe_ctx_init(hw_frames_ctx);
  391. if (ret < 0) {
  392. av_buffer_unref(&hw_frames_ctx);
  393. return ret;
  394. }
  395. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  396. vtctx->cached_hw_frames_ctx = hw_frames_ctx;
  397. }
  398. av_buffer_unref(&frame->hw_frames_ctx);
  399. frame->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
  400. if (!frame->hw_frames_ctx)
  401. return AVERROR(ENOMEM);
  402. return 0;
  403. }
  404. static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
  405. {
  406. int i;
  407. uint8_t b;
  408. for (i = 3; i >= 0; i--) {
  409. b = (length >> (i * 7)) & 0x7F;
  410. if (i != 0)
  411. b |= 0x80;
  412. bytestream2_put_byteu(pb, b);
  413. }
  414. }
  415. static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
  416. {
  417. CFDataRef data;
  418. uint8_t *rw_extradata;
  419. PutByteContext pb;
  420. int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
  421. // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
  422. int config_size = 13 + 5 + avctx->extradata_size;
  423. int s;
  424. if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
  425. return NULL;
  426. bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
  427. bytestream2_put_byteu(&pb, 0); // version
  428. bytestream2_put_ne24(&pb, 0); // flags
  429. // elementary stream descriptor
  430. bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
  431. videotoolbox_write_mp4_descr_length(&pb, full_size);
  432. bytestream2_put_ne16(&pb, 0); // esid
  433. bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
  434. // decoder configuration descriptor
  435. bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
  436. videotoolbox_write_mp4_descr_length(&pb, config_size);
  437. bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
  438. bytestream2_put_byteu(&pb, 0x11); // stream type
  439. bytestream2_put_ne24(&pb, 0); // buffer size
  440. bytestream2_put_ne32(&pb, 0); // max bitrate
  441. bytestream2_put_ne32(&pb, 0); // avg bitrate
  442. // decoder specific descriptor
  443. bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
  444. videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
  445. bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
  446. // SLConfigDescriptor
  447. bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
  448. bytestream2_put_byteu(&pb, 0x01); // length
  449. bytestream2_put_byteu(&pb, 0x02); //
  450. s = bytestream2_size_p(&pb);
  451. data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
  452. av_freep(&rw_extradata);
  453. return data;
  454. }
  455. static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
  456. void *buffer,
  457. int size)
  458. {
  459. OSStatus status;
  460. CMBlockBufferRef block_buf;
  461. CMSampleBufferRef sample_buf;
  462. block_buf = NULL;
  463. sample_buf = NULL;
  464. status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
  465. buffer, // memoryBlock
  466. size, // blockLength
  467. kCFAllocatorNull, // blockAllocator
  468. NULL, // customBlockSource
  469. 0, // offsetToData
  470. size, // dataLength
  471. 0, // flags
  472. &block_buf);
  473. if (!status) {
  474. status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
  475. block_buf, // dataBuffer
  476. TRUE, // dataReady
  477. 0, // makeDataReadyCallback
  478. 0, // makeDataReadyRefcon
  479. fmt_desc, // formatDescription
  480. 1, // numSamples
  481. 0, // numSampleTimingEntries
  482. NULL, // sampleTimingArray
  483. 0, // numSampleSizeEntries
  484. NULL, // sampleSizeArray
  485. &sample_buf);
  486. }
  487. if (block_buf)
  488. CFRelease(block_buf);
  489. return sample_buf;
  490. }
  491. static void videotoolbox_decoder_callback(void *opaque,
  492. void *sourceFrameRefCon,
  493. OSStatus status,
  494. VTDecodeInfoFlags flags,
  495. CVImageBufferRef image_buffer,
  496. CMTime pts,
  497. CMTime duration)
  498. {
  499. AVCodecContext *avctx = opaque;
  500. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  501. if (vtctx->frame) {
  502. CVPixelBufferRelease(vtctx->frame);
  503. vtctx->frame = NULL;
  504. }
  505. if (!image_buffer) {
  506. av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
  507. return;
  508. }
  509. vtctx->frame = CVPixelBufferRetain(image_buffer);
  510. }
  511. static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
  512. {
  513. OSStatus status;
  514. CMSampleBufferRef sample_buf;
  515. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  516. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  517. sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
  518. vtctx->bitstream,
  519. vtctx->bitstream_size);
  520. if (!sample_buf)
  521. return -1;
  522. status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
  523. sample_buf,
  524. 0, // decodeFlags
  525. NULL, // sourceFrameRefCon
  526. 0); // infoFlagsOut
  527. if (status == noErr)
  528. status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
  529. CFRelease(sample_buf);
  530. return status;
  531. }
  532. static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
  533. CFDictionaryRef decoder_spec,
  534. int width,
  535. int height)
  536. {
  537. CMFormatDescriptionRef cm_fmt_desc;
  538. OSStatus status;
  539. status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
  540. codec_type,
  541. width,
  542. height,
  543. decoder_spec, // Dictionary of extension
  544. &cm_fmt_desc);
  545. if (status)
  546. return NULL;
  547. return cm_fmt_desc;
  548. }
  549. static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
  550. int height,
  551. OSType pix_fmt)
  552. {
  553. CFMutableDictionaryRef buffer_attributes;
  554. CFMutableDictionaryRef io_surface_properties;
  555. CFNumberRef cv_pix_fmt;
  556. CFNumberRef w;
  557. CFNumberRef h;
  558. w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
  559. h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
  560. cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
  561. buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
  562. 4,
  563. &kCFTypeDictionaryKeyCallBacks,
  564. &kCFTypeDictionaryValueCallBacks);
  565. io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
  566. 0,
  567. &kCFTypeDictionaryKeyCallBacks,
  568. &kCFTypeDictionaryValueCallBacks);
  569. if (pix_fmt)
  570. CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
  571. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
  572. CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
  573. CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
  574. #if TARGET_OS_IPHONE
  575. CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
  576. #else
  577. CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
  578. #endif
  579. CFRelease(io_surface_properties);
  580. CFRelease(cv_pix_fmt);
  581. CFRelease(w);
  582. CFRelease(h);
  583. return buffer_attributes;
  584. }
  585. static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
  586. AVCodecContext *avctx)
  587. {
  588. CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  589. 0,
  590. &kCFTypeDictionaryKeyCallBacks,
  591. &kCFTypeDictionaryValueCallBacks);
  592. CFDictionarySetValue(config_info,
  593. codec_type == kCMVideoCodecType_HEVC ?
  594. kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder :
  595. kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
  596. kCFBooleanTrue);
  597. CFMutableDictionaryRef avc_info;
  598. CFDataRef data = NULL;
  599. avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
  600. 1,
  601. &kCFTypeDictionaryKeyCallBacks,
  602. &kCFTypeDictionaryValueCallBacks);
  603. switch (codec_type) {
  604. case kCMVideoCodecType_MPEG4Video :
  605. if (avctx->extradata_size)
  606. data = videotoolbox_esds_extradata_create(avctx);
  607. if (data)
  608. CFDictionarySetValue(avc_info, CFSTR("esds"), data);
  609. break;
  610. case kCMVideoCodecType_H264 :
  611. data = ff_videotoolbox_avcc_extradata_create(avctx);
  612. if (data)
  613. CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
  614. break;
  615. case kCMVideoCodecType_HEVC :
  616. data = ff_videotoolbox_hvcc_extradata_create(avctx);
  617. if (data)
  618. CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
  619. break;
  620. default:
  621. break;
  622. }
  623. CFDictionarySetValue(config_info,
  624. kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
  625. avc_info);
  626. if (data)
  627. CFRelease(data);
  628. CFRelease(avc_info);
  629. return config_info;
  630. }
  631. static int videotoolbox_start(AVCodecContext *avctx)
  632. {
  633. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  634. OSStatus status;
  635. VTDecompressionOutputCallbackRecord decoder_cb;
  636. CFDictionaryRef decoder_spec;
  637. CFDictionaryRef buf_attr;
  638. if (!videotoolbox) {
  639. av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
  640. return -1;
  641. }
  642. switch( avctx->codec_id ) {
  643. case AV_CODEC_ID_H263 :
  644. videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
  645. break;
  646. case AV_CODEC_ID_H264 :
  647. videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
  648. break;
  649. case AV_CODEC_ID_HEVC :
  650. videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
  651. break;
  652. case AV_CODEC_ID_MPEG1VIDEO :
  653. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
  654. break;
  655. case AV_CODEC_ID_MPEG2VIDEO :
  656. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
  657. break;
  658. case AV_CODEC_ID_MPEG4 :
  659. videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
  660. break;
  661. default :
  662. break;
  663. }
  664. decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
  665. if (!decoder_spec) {
  666. av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
  667. return -1;
  668. }
  669. videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
  670. decoder_spec,
  671. avctx->width,
  672. avctx->height);
  673. if (!videotoolbox->cm_fmt_desc) {
  674. if (decoder_spec)
  675. CFRelease(decoder_spec);
  676. av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
  677. return -1;
  678. }
  679. buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
  680. avctx->height,
  681. videotoolbox->cv_pix_fmt_type);
  682. decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
  683. decoder_cb.decompressionOutputRefCon = avctx;
  684. status = VTDecompressionSessionCreate(NULL, // allocator
  685. videotoolbox->cm_fmt_desc, // videoFormatDescription
  686. decoder_spec, // videoDecoderSpecification
  687. buf_attr, // destinationImageBufferAttributes
  688. &decoder_cb, // outputCallback
  689. &videotoolbox->session); // decompressionSessionOut
  690. if (decoder_spec)
  691. CFRelease(decoder_spec);
  692. if (buf_attr)
  693. CFRelease(buf_attr);
  694. switch (status) {
  695. case kVTVideoDecoderNotAvailableNowErr:
  696. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
  697. return AVERROR(ENOSYS);
  698. case kVTVideoDecoderUnsupportedDataFormatErr:
  699. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
  700. return AVERROR(ENOSYS);
  701. case kVTCouldNotFindVideoDecoderErr:
  702. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
  703. return AVERROR(ENOSYS);
  704. case kVTVideoDecoderMalfunctionErr:
  705. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
  706. return AVERROR(EINVAL);
  707. case kVTVideoDecoderBadDataErr:
  708. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
  709. return AVERROR_INVALIDDATA;
  710. case 0:
  711. return 0;
  712. default:
  713. av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
  714. return AVERROR_UNKNOWN;
  715. }
  716. }
  717. static void videotoolbox_stop(AVCodecContext *avctx)
  718. {
  719. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  720. if (!videotoolbox)
  721. return;
  722. if (videotoolbox->cm_fmt_desc) {
  723. CFRelease(videotoolbox->cm_fmt_desc);
  724. videotoolbox->cm_fmt_desc = NULL;
  725. }
  726. if (videotoolbox->session) {
  727. VTDecompressionSessionInvalidate(videotoolbox->session);
  728. CFRelease(videotoolbox->session);
  729. videotoolbox->session = NULL;
  730. }
  731. }
  732. static const char *videotoolbox_error_string(OSStatus status)
  733. {
  734. switch (status) {
  735. case kVTVideoDecoderBadDataErr:
  736. return "bad data";
  737. case kVTVideoDecoderMalfunctionErr:
  738. return "decoder malfunction";
  739. case kVTInvalidSessionErr:
  740. return "invalid session";
  741. }
  742. return "unknown";
  743. }
  744. static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
  745. {
  746. OSStatus status;
  747. AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
  748. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  749. if (vtctx->reconfig_needed == true) {
  750. vtctx->reconfig_needed = false;
  751. av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
  752. videotoolbox_stop(avctx);
  753. if (videotoolbox_start(avctx) != 0) {
  754. return AVERROR_EXTERNAL;
  755. }
  756. }
  757. if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
  758. return AVERROR_INVALIDDATA;
  759. status = videotoolbox_session_decode_frame(avctx);
  760. if (status != noErr) {
  761. if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
  762. vtctx->reconfig_needed = true;
  763. av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
  764. return AVERROR_UNKNOWN;
  765. }
  766. if (!vtctx->frame) {
  767. vtctx->reconfig_needed = true;
  768. return AVERROR_UNKNOWN;
  769. }
  770. return videotoolbox_buffer_create(avctx, frame);
  771. }
  772. static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
  773. {
  774. H264Context *h = avctx->priv_data;
  775. AVFrame *frame = h->cur_pic_ptr->f;
  776. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  777. int ret = videotoolbox_common_end_frame(avctx, frame);
  778. vtctx->bitstream_size = 0;
  779. return ret;
  780. }
  781. static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
  782. int type,
  783. const uint8_t *buffer,
  784. uint32_t size)
  785. {
  786. return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
  787. }
  788. static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
  789. {
  790. HEVCContext *h = avctx->priv_data;
  791. AVFrame *frame = h->ref->frame;
  792. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  793. int ret;
  794. ret = videotoolbox_common_end_frame(avctx, frame);
  795. vtctx->bitstream_size = 0;
  796. return ret;
  797. }
  798. static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
  799. const uint8_t *buffer,
  800. uint32_t size)
  801. {
  802. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  803. return videotoolbox_buffer_copy(vtctx, buffer, size);
  804. }
  805. static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
  806. const uint8_t *buffer,
  807. uint32_t size)
  808. {
  809. return 0;
  810. }
  811. static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
  812. {
  813. MpegEncContext *s = avctx->priv_data;
  814. AVFrame *frame = s->current_picture_ptr->f;
  815. return videotoolbox_common_end_frame(avctx, frame);
  816. }
  817. static int videotoolbox_uninit(AVCodecContext *avctx)
  818. {
  819. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  820. if (!vtctx)
  821. return 0;
  822. ff_videotoolbox_uninit(avctx);
  823. if (vtctx->vt_ctx)
  824. videotoolbox_stop(avctx);
  825. av_buffer_unref(&vtctx->cached_hw_frames_ctx);
  826. av_freep(&vtctx->vt_ctx);
  827. return 0;
  828. }
  829. static int videotoolbox_common_init(AVCodecContext *avctx)
  830. {
  831. VTContext *vtctx = avctx->internal->hwaccel_priv_data;
  832. AVHWFramesContext *hw_frames;
  833. int err;
  834. // Old API - do nothing.
  835. if (avctx->hwaccel_context)
  836. return 0;
  837. if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
  838. av_log(avctx, AV_LOG_ERROR,
  839. "Either hw_frames_ctx or hw_device_ctx must be set.\n");
  840. return AVERROR(EINVAL);
  841. }
  842. vtctx->vt_ctx = av_videotoolbox_alloc_context();
  843. if (!vtctx->vt_ctx) {
  844. err = AVERROR(ENOMEM);
  845. goto fail;
  846. }
  847. if (avctx->hw_frames_ctx) {
  848. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  849. } else {
  850. avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
  851. if (!avctx->hw_frames_ctx) {
  852. err = AVERROR(ENOMEM);
  853. goto fail;
  854. }
  855. hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  856. hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
  857. hw_frames->sw_format = AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
  858. hw_frames->width = avctx->width;
  859. hw_frames->height = avctx->height;
  860. err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
  861. if (err < 0) {
  862. av_buffer_unref(&avctx->hw_frames_ctx);
  863. goto fail;
  864. }
  865. }
  866. vtctx->cached_hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
  867. if (!vtctx->cached_hw_frames_ctx) {
  868. err = AVERROR(ENOMEM);
  869. goto fail;
  870. }
  871. vtctx->vt_ctx->cv_pix_fmt_type =
  872. av_map_videotoolbox_format_from_pixfmt(hw_frames->sw_format);
  873. if (!vtctx->vt_ctx->cv_pix_fmt_type) {
  874. av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
  875. err = AVERROR(EINVAL);
  876. goto fail;
  877. }
  878. err = videotoolbox_start(avctx);
  879. if (err < 0)
  880. goto fail;
  881. return 0;
  882. fail:
  883. videotoolbox_uninit(avctx);
  884. return err;
  885. }
  886. static int videotoolbox_frame_params(AVCodecContext *avctx,
  887. AVBufferRef *hw_frames_ctx)
  888. {
  889. AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
  890. frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
  891. frames_ctx->width = avctx->coded_width;
  892. frames_ctx->height = avctx->coded_height;
  893. frames_ctx->sw_format = AV_PIX_FMT_NV12;
  894. return 0;
  895. }
  896. const AVHWAccel ff_h263_videotoolbox_hwaccel = {
  897. .name = "h263_videotoolbox",
  898. .type = AVMEDIA_TYPE_VIDEO,
  899. .id = AV_CODEC_ID_H263,
  900. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  901. .alloc_frame = ff_videotoolbox_alloc_frame,
  902. .start_frame = videotoolbox_mpeg_start_frame,
  903. .decode_slice = videotoolbox_mpeg_decode_slice,
  904. .end_frame = videotoolbox_mpeg_end_frame,
  905. .frame_params = videotoolbox_frame_params,
  906. .init = videotoolbox_common_init,
  907. .uninit = videotoolbox_uninit,
  908. .priv_data_size = sizeof(VTContext),
  909. };
  910. const AVHWAccel ff_hevc_videotoolbox_hwaccel = {
  911. .name = "hevc_videotoolbox",
  912. .type = AVMEDIA_TYPE_VIDEO,
  913. .id = AV_CODEC_ID_HEVC,
  914. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  915. .alloc_frame = ff_videotoolbox_alloc_frame,
  916. .start_frame = ff_videotoolbox_h264_start_frame,
  917. .decode_slice = ff_videotoolbox_h264_decode_slice,
  918. .decode_params = videotoolbox_hevc_decode_params,
  919. .end_frame = videotoolbox_hevc_end_frame,
  920. .frame_params = videotoolbox_frame_params,
  921. .init = videotoolbox_common_init,
  922. .uninit = ff_videotoolbox_uninit,
  923. .priv_data_size = sizeof(VTContext),
  924. };
  925. const AVHWAccel ff_h264_videotoolbox_hwaccel = {
  926. .name = "h264_videotoolbox",
  927. .type = AVMEDIA_TYPE_VIDEO,
  928. .id = AV_CODEC_ID_H264,
  929. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  930. .alloc_frame = ff_videotoolbox_alloc_frame,
  931. .start_frame = ff_videotoolbox_h264_start_frame,
  932. .decode_slice = ff_videotoolbox_h264_decode_slice,
  933. .decode_params = videotoolbox_h264_decode_params,
  934. .end_frame = videotoolbox_h264_end_frame,
  935. .frame_params = videotoolbox_frame_params,
  936. .init = videotoolbox_common_init,
  937. .uninit = videotoolbox_uninit,
  938. .priv_data_size = sizeof(VTContext),
  939. };
  940. const AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
  941. .name = "mpeg1_videotoolbox",
  942. .type = AVMEDIA_TYPE_VIDEO,
  943. .id = AV_CODEC_ID_MPEG1VIDEO,
  944. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  945. .alloc_frame = ff_videotoolbox_alloc_frame,
  946. .start_frame = videotoolbox_mpeg_start_frame,
  947. .decode_slice = videotoolbox_mpeg_decode_slice,
  948. .end_frame = videotoolbox_mpeg_end_frame,
  949. .frame_params = videotoolbox_frame_params,
  950. .init = videotoolbox_common_init,
  951. .uninit = videotoolbox_uninit,
  952. .priv_data_size = sizeof(VTContext),
  953. };
  954. const AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
  955. .name = "mpeg2_videotoolbox",
  956. .type = AVMEDIA_TYPE_VIDEO,
  957. .id = AV_CODEC_ID_MPEG2VIDEO,
  958. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  959. .alloc_frame = ff_videotoolbox_alloc_frame,
  960. .start_frame = videotoolbox_mpeg_start_frame,
  961. .decode_slice = videotoolbox_mpeg_decode_slice,
  962. .end_frame = videotoolbox_mpeg_end_frame,
  963. .frame_params = videotoolbox_frame_params,
  964. .init = videotoolbox_common_init,
  965. .uninit = videotoolbox_uninit,
  966. .priv_data_size = sizeof(VTContext),
  967. };
  968. const AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
  969. .name = "mpeg4_videotoolbox",
  970. .type = AVMEDIA_TYPE_VIDEO,
  971. .id = AV_CODEC_ID_MPEG4,
  972. .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
  973. .alloc_frame = ff_videotoolbox_alloc_frame,
  974. .start_frame = videotoolbox_mpeg_start_frame,
  975. .decode_slice = videotoolbox_mpeg_decode_slice,
  976. .end_frame = videotoolbox_mpeg_end_frame,
  977. .frame_params = videotoolbox_frame_params,
  978. .init = videotoolbox_common_init,
  979. .uninit = videotoolbox_uninit,
  980. .priv_data_size = sizeof(VTContext),
  981. };
  982. AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
  983. {
  984. AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
  985. if (ret) {
  986. ret->output_callback = videotoolbox_decoder_callback;
  987. ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
  988. }
  989. return ret;
  990. }
  991. int av_videotoolbox_default_init(AVCodecContext *avctx)
  992. {
  993. return av_videotoolbox_default_init2(avctx, NULL);
  994. }
  995. int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
  996. {
  997. avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
  998. if (!avctx->hwaccel_context)
  999. return AVERROR(ENOMEM);
  1000. return videotoolbox_start(avctx);
  1001. }
  1002. void av_videotoolbox_default_free(AVCodecContext *avctx)
  1003. {
  1004. videotoolbox_stop(avctx);
  1005. av_freep(&avctx->hwaccel_context);
  1006. }
  1007. #endif /* CONFIG_VIDEOTOOLBOX */