You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

852 lines
30KB

  1. /*
  2. * AVFoundation input device
  3. * Copyright (c) 2014 Thilo Borgmann <thilo.borgmann@mail.de>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * AVFoundation input device
  24. * @author Thilo Borgmann <thilo.borgmann@mail.de>
  25. */
  26. #import <AVFoundation/AVFoundation.h>
  27. #include <pthread.h>
  28. #include "libavutil/pixdesc.h"
  29. #include "libavutil/opt.h"
  30. #include "libavformat/internal.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/time.h"
  33. #include "avdevice.h"
  34. static const int avf_time_base = 1000000;
  35. static const AVRational avf_time_base_q = {
  36. .num = 1,
  37. .den = avf_time_base
  38. };
  39. struct AVFPixelFormatSpec {
  40. enum AVPixelFormat ff_id;
  41. OSType avf_id;
  42. };
  43. static const struct AVFPixelFormatSpec avf_pixel_formats[] = {
  44. { AV_PIX_FMT_MONOBLACK, kCVPixelFormatType_1Monochrome },
  45. { AV_PIX_FMT_RGB555BE, kCVPixelFormatType_16BE555 },
  46. { AV_PIX_FMT_RGB555LE, kCVPixelFormatType_16LE555 },
  47. { AV_PIX_FMT_RGB565BE, kCVPixelFormatType_16BE565 },
  48. { AV_PIX_FMT_RGB565LE, kCVPixelFormatType_16LE565 },
  49. { AV_PIX_FMT_RGB24, kCVPixelFormatType_24RGB },
  50. { AV_PIX_FMT_BGR24, kCVPixelFormatType_24BGR },
  51. { AV_PIX_FMT_0RGB, kCVPixelFormatType_32ARGB },
  52. { AV_PIX_FMT_BGR0, kCVPixelFormatType_32BGRA },
  53. { AV_PIX_FMT_0BGR, kCVPixelFormatType_32ABGR },
  54. { AV_PIX_FMT_RGB0, kCVPixelFormatType_32RGBA },
  55. { AV_PIX_FMT_BGR48BE, kCVPixelFormatType_48RGB },
  56. { AV_PIX_FMT_UYVY422, kCVPixelFormatType_422YpCbCr8 },
  57. { AV_PIX_FMT_YUVA444P, kCVPixelFormatType_4444YpCbCrA8R },
  58. { AV_PIX_FMT_YUVA444P16LE, kCVPixelFormatType_4444AYpCbCr16 },
  59. { AV_PIX_FMT_YUV444P, kCVPixelFormatType_444YpCbCr8 },
  60. { AV_PIX_FMT_YUV422P16, kCVPixelFormatType_422YpCbCr16 },
  61. { AV_PIX_FMT_YUV422P10, kCVPixelFormatType_422YpCbCr10 },
  62. { AV_PIX_FMT_YUV444P10, kCVPixelFormatType_444YpCbCr10 },
  63. { AV_PIX_FMT_YUV420P, kCVPixelFormatType_420YpCbCr8Planar },
  64. { AV_PIX_FMT_NV12, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange },
  65. { AV_PIX_FMT_YUYV422, kCVPixelFormatType_422YpCbCr8_yuvs },
  66. #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
  67. { AV_PIX_FMT_GRAY8, kCVPixelFormatType_OneComponent8 },
  68. #endif
  69. { AV_PIX_FMT_NONE, 0 }
  70. };
  71. typedef struct
  72. {
  73. AVClass* class;
  74. float frame_rate;
  75. int frames_captured;
  76. int audio_frames_captured;
  77. int64_t first_pts;
  78. int64_t first_audio_pts;
  79. pthread_mutex_t frame_lock;
  80. pthread_cond_t frame_wait_cond;
  81. id avf_delegate;
  82. id avf_audio_delegate;
  83. int list_devices;
  84. int video_device_index;
  85. int video_stream_index;
  86. int audio_device_index;
  87. int audio_stream_index;
  88. char *video_filename;
  89. char *audio_filename;
  90. int num_video_devices;
  91. int audio_channels;
  92. int audio_bits_per_sample;
  93. int audio_float;
  94. int audio_be;
  95. int audio_signed_integer;
  96. int audio_packed;
  97. int audio_non_interleaved;
  98. int32_t *audio_buffer;
  99. int audio_buffer_size;
  100. enum AVPixelFormat pixel_format;
  101. AVCaptureSession *capture_session;
  102. AVCaptureVideoDataOutput *video_output;
  103. AVCaptureAudioDataOutput *audio_output;
  104. CMSampleBufferRef current_frame;
  105. CMSampleBufferRef current_audio_frame;
  106. } AVFContext;
  107. static void lock_frames(AVFContext* ctx)
  108. {
  109. pthread_mutex_lock(&ctx->frame_lock);
  110. }
  111. static void unlock_frames(AVFContext* ctx)
  112. {
  113. pthread_mutex_unlock(&ctx->frame_lock);
  114. }
  115. /** FrameReciever class - delegate for AVCaptureSession
  116. */
  117. @interface AVFFrameReceiver : NSObject
  118. {
  119. AVFContext* _context;
  120. }
  121. - (id)initWithContext:(AVFContext*)context;
  122. - (void) captureOutput:(AVCaptureOutput *)captureOutput
  123. didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
  124. fromConnection:(AVCaptureConnection *)connection;
  125. @end
  126. @implementation AVFFrameReceiver
  127. - (id)initWithContext:(AVFContext*)context
  128. {
  129. if (self = [super init]) {
  130. _context = context;
  131. }
  132. return self;
  133. }
  134. - (void) captureOutput:(AVCaptureOutput *)captureOutput
  135. didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
  136. fromConnection:(AVCaptureConnection *)connection
  137. {
  138. lock_frames(_context);
  139. if (_context->current_frame != nil) {
  140. CFRelease(_context->current_frame);
  141. }
  142. _context->current_frame = (CMSampleBufferRef)CFRetain(videoFrame);
  143. pthread_cond_signal(&_context->frame_wait_cond);
  144. unlock_frames(_context);
  145. ++_context->frames_captured;
  146. }
  147. @end
  148. /** AudioReciever class - delegate for AVCaptureSession
  149. */
  150. @interface AVFAudioReceiver : NSObject
  151. {
  152. AVFContext* _context;
  153. }
  154. - (id)initWithContext:(AVFContext*)context;
  155. - (void) captureOutput:(AVCaptureOutput *)captureOutput
  156. didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
  157. fromConnection:(AVCaptureConnection *)connection;
  158. @end
  159. @implementation AVFAudioReceiver
  160. - (id)initWithContext:(AVFContext*)context
  161. {
  162. if (self = [super init]) {
  163. _context = context;
  164. }
  165. return self;
  166. }
  167. - (void) captureOutput:(AVCaptureOutput *)captureOutput
  168. didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
  169. fromConnection:(AVCaptureConnection *)connection
  170. {
  171. lock_frames(_context);
  172. if (_context->current_audio_frame != nil) {
  173. CFRelease(_context->current_audio_frame);
  174. }
  175. _context->current_audio_frame = (CMSampleBufferRef)CFRetain(audioFrame);
  176. pthread_cond_signal(&_context->frame_wait_cond);
  177. unlock_frames(_context);
  178. ++_context->audio_frames_captured;
  179. }
  180. @end
  181. static void destroy_context(AVFContext* ctx)
  182. {
  183. [ctx->capture_session stopRunning];
  184. [ctx->capture_session release];
  185. [ctx->video_output release];
  186. [ctx->audio_output release];
  187. [ctx->avf_delegate release];
  188. [ctx->avf_audio_delegate release];
  189. ctx->capture_session = NULL;
  190. ctx->video_output = NULL;
  191. ctx->audio_output = NULL;
  192. ctx->avf_delegate = NULL;
  193. ctx->avf_audio_delegate = NULL;
  194. av_freep(&ctx->audio_buffer);
  195. pthread_mutex_destroy(&ctx->frame_lock);
  196. pthread_cond_destroy(&ctx->frame_wait_cond);
  197. if (ctx->current_frame) {
  198. CFRelease(ctx->current_frame);
  199. }
  200. }
  201. static void parse_device_name(AVFormatContext *s)
  202. {
  203. AVFContext *ctx = (AVFContext*)s->priv_data;
  204. char *tmp = av_strdup(s->filename);
  205. if (tmp[0] != ':') {
  206. ctx->video_filename = strtok(tmp, ":");
  207. ctx->audio_filename = strtok(NULL, ":");
  208. } else {
  209. ctx->audio_filename = strtok(tmp, ":");
  210. }
  211. }
  212. static int add_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
  213. {
  214. AVFContext *ctx = (AVFContext*)s->priv_data;
  215. NSError *error = nil;
  216. AVCaptureInput* capture_input = nil;
  217. if (ctx->video_device_index < ctx->num_video_devices) {
  218. capture_input = (AVCaptureInput*) [[[AVCaptureDeviceInput alloc] initWithDevice:video_device error:&error] autorelease];
  219. } else {
  220. capture_input = (AVCaptureInput*) video_device;
  221. }
  222. if (!capture_input) {
  223. av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
  224. [[error localizedDescription] UTF8String]);
  225. return 1;
  226. }
  227. if ([ctx->capture_session canAddInput:capture_input]) {
  228. [ctx->capture_session addInput:capture_input];
  229. } else {
  230. av_log(s, AV_LOG_ERROR, "can't add video input to capture session\n");
  231. return 1;
  232. }
  233. // Attaching output
  234. ctx->video_output = [[AVCaptureVideoDataOutput alloc] init];
  235. if (!ctx->video_output) {
  236. av_log(s, AV_LOG_ERROR, "Failed to init AV video output\n");
  237. return 1;
  238. }
  239. // select pixel format
  240. struct AVFPixelFormatSpec pxl_fmt_spec;
  241. pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
  242. for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
  243. if (ctx->pixel_format == avf_pixel_formats[i].ff_id) {
  244. pxl_fmt_spec = avf_pixel_formats[i];
  245. break;
  246. }
  247. }
  248. // check if selected pixel format is supported by AVFoundation
  249. if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
  250. av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by AVFoundation.\n",
  251. av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
  252. return 1;
  253. }
  254. // check if the pixel format is available for this device
  255. if ([[ctx->video_output availableVideoCVPixelFormatTypes] indexOfObject:[NSNumber numberWithInt:pxl_fmt_spec.avf_id]] == NSNotFound) {
  256. av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by the input device.\n",
  257. av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
  258. pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
  259. av_log(s, AV_LOG_ERROR, "Supported pixel formats:\n");
  260. for (NSNumber *pxl_fmt in [ctx->video_output availableVideoCVPixelFormatTypes]) {
  261. struct AVFPixelFormatSpec pxl_fmt_dummy;
  262. pxl_fmt_dummy.ff_id = AV_PIX_FMT_NONE;
  263. for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
  264. if ([pxl_fmt intValue] == avf_pixel_formats[i].avf_id) {
  265. pxl_fmt_dummy = avf_pixel_formats[i];
  266. break;
  267. }
  268. }
  269. if (pxl_fmt_dummy.ff_id != AV_PIX_FMT_NONE) {
  270. av_log(s, AV_LOG_ERROR, " %s\n", av_get_pix_fmt_name(pxl_fmt_dummy.ff_id));
  271. // select first supported pixel format instead of user selected (or default) pixel format
  272. if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
  273. pxl_fmt_spec = pxl_fmt_dummy;
  274. }
  275. }
  276. }
  277. // fail if there is no appropriate pixel format or print a warning about overriding the pixel format
  278. if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
  279. return 1;
  280. } else {
  281. av_log(s, AV_LOG_WARNING, "Overriding selected pixel format to use %s instead.\n",
  282. av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
  283. }
  284. }
  285. ctx->pixel_format = pxl_fmt_spec.ff_id;
  286. NSNumber *pixel_format = [NSNumber numberWithUnsignedInt:pxl_fmt_spec.avf_id];
  287. NSDictionary *capture_dict = [NSDictionary dictionaryWithObject:pixel_format
  288. forKey:(id)kCVPixelBufferPixelFormatTypeKey];
  289. [ctx->video_output setVideoSettings:capture_dict];
  290. [ctx->video_output setAlwaysDiscardsLateVideoFrames:YES];
  291. ctx->avf_delegate = [[AVFFrameReceiver alloc] initWithContext:ctx];
  292. dispatch_queue_t queue = dispatch_queue_create("avf_queue", NULL);
  293. [ctx->video_output setSampleBufferDelegate:ctx->avf_delegate queue:queue];
  294. dispatch_release(queue);
  295. if ([ctx->capture_session canAddOutput:ctx->video_output]) {
  296. [ctx->capture_session addOutput:ctx->video_output];
  297. } else {
  298. av_log(s, AV_LOG_ERROR, "can't add video output to capture session\n");
  299. return 1;
  300. }
  301. return 0;
  302. }
  303. static int add_audio_device(AVFormatContext *s, AVCaptureDevice *audio_device)
  304. {
  305. AVFContext *ctx = (AVFContext*)s->priv_data;
  306. NSError *error = nil;
  307. AVCaptureDeviceInput* audio_dev_input = [[[AVCaptureDeviceInput alloc] initWithDevice:audio_device error:&error] autorelease];
  308. if (!audio_dev_input) {
  309. av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
  310. [[error localizedDescription] UTF8String]);
  311. return 1;
  312. }
  313. if ([ctx->capture_session canAddInput:audio_dev_input]) {
  314. [ctx->capture_session addInput:audio_dev_input];
  315. } else {
  316. av_log(s, AV_LOG_ERROR, "can't add audio input to capture session\n");
  317. return 1;
  318. }
  319. // Attaching output
  320. ctx->audio_output = [[AVCaptureAudioDataOutput alloc] init];
  321. if (!ctx->audio_output) {
  322. av_log(s, AV_LOG_ERROR, "Failed to init AV audio output\n");
  323. return 1;
  324. }
  325. ctx->avf_audio_delegate = [[AVFAudioReceiver alloc] initWithContext:ctx];
  326. dispatch_queue_t queue = dispatch_queue_create("avf_audio_queue", NULL);
  327. [ctx->audio_output setSampleBufferDelegate:ctx->avf_audio_delegate queue:queue];
  328. dispatch_release(queue);
  329. if ([ctx->capture_session canAddOutput:ctx->audio_output]) {
  330. [ctx->capture_session addOutput:ctx->audio_output];
  331. } else {
  332. av_log(s, AV_LOG_ERROR, "adding audio output to capture session failed\n");
  333. return 1;
  334. }
  335. return 0;
  336. }
  337. static int get_video_config(AVFormatContext *s)
  338. {
  339. AVFContext *ctx = (AVFContext*)s->priv_data;
  340. // Take stream info from the first frame.
  341. while (ctx->frames_captured < 1) {
  342. CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
  343. }
  344. lock_frames(ctx);
  345. AVStream* stream = avformat_new_stream(s, NULL);
  346. if (!stream) {
  347. return 1;
  348. }
  349. ctx->video_stream_index = stream->index;
  350. avpriv_set_pts_info(stream, 64, 1, avf_time_base);
  351. CVImageBufferRef image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
  352. CGSize image_buffer_size = CVImageBufferGetEncodedSize(image_buffer);
  353. stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
  354. stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  355. stream->codec->width = (int)image_buffer_size.width;
  356. stream->codec->height = (int)image_buffer_size.height;
  357. stream->codec->pix_fmt = ctx->pixel_format;
  358. CFRelease(ctx->current_frame);
  359. ctx->current_frame = nil;
  360. unlock_frames(ctx);
  361. return 0;
  362. }
  363. static int get_audio_config(AVFormatContext *s)
  364. {
  365. AVFContext *ctx = (AVFContext*)s->priv_data;
  366. // Take stream info from the first frame.
  367. while (ctx->audio_frames_captured < 1) {
  368. CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
  369. }
  370. lock_frames(ctx);
  371. AVStream* stream = avformat_new_stream(s, NULL);
  372. if (!stream) {
  373. return 1;
  374. }
  375. ctx->audio_stream_index = stream->index;
  376. avpriv_set_pts_info(stream, 64, 1, avf_time_base);
  377. CMFormatDescriptionRef format_desc = CMSampleBufferGetFormatDescription(ctx->current_audio_frame);
  378. const AudioStreamBasicDescription *basic_desc = CMAudioFormatDescriptionGetStreamBasicDescription(format_desc);
  379. if (!basic_desc) {
  380. av_log(s, AV_LOG_ERROR, "audio format not available\n");
  381. return 1;
  382. }
  383. stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
  384. stream->codec->sample_rate = basic_desc->mSampleRate;
  385. stream->codec->channels = basic_desc->mChannelsPerFrame;
  386. stream->codec->channel_layout = av_get_default_channel_layout(stream->codec->channels);
  387. ctx->audio_channels = basic_desc->mChannelsPerFrame;
  388. ctx->audio_bits_per_sample = basic_desc->mBitsPerChannel;
  389. ctx->audio_float = basic_desc->mFormatFlags & kAudioFormatFlagIsFloat;
  390. ctx->audio_be = basic_desc->mFormatFlags & kAudioFormatFlagIsBigEndian;
  391. ctx->audio_signed_integer = basic_desc->mFormatFlags & kAudioFormatFlagIsSignedInteger;
  392. ctx->audio_packed = basic_desc->mFormatFlags & kAudioFormatFlagIsPacked;
  393. ctx->audio_non_interleaved = basic_desc->mFormatFlags & kAudioFormatFlagIsNonInterleaved;
  394. if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
  395. ctx->audio_float &&
  396. ctx->audio_packed) {
  397. stream->codec->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
  398. } else {
  399. av_log(s, AV_LOG_ERROR, "audio format is not supported\n");
  400. return 1;
  401. }
  402. if (ctx->audio_non_interleaved) {
  403. CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
  404. ctx->audio_buffer_size = CMBlockBufferGetDataLength(block_buffer);
  405. ctx->audio_buffer = av_malloc(ctx->audio_buffer_size);
  406. if (!ctx->audio_buffer) {
  407. av_log(s, AV_LOG_ERROR, "error allocating audio buffer\n");
  408. return 1;
  409. }
  410. }
  411. CFRelease(ctx->current_audio_frame);
  412. ctx->current_audio_frame = nil;
  413. unlock_frames(ctx);
  414. return 0;
  415. }
  416. static int avf_read_header(AVFormatContext *s)
  417. {
  418. NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
  419. AVFContext *ctx = (AVFContext*)s->priv_data;
  420. ctx->first_pts = av_gettime();
  421. ctx->first_audio_pts = av_gettime();
  422. uint32_t num_screens = 0;
  423. pthread_mutex_init(&ctx->frame_lock, NULL);
  424. pthread_cond_init(&ctx->frame_wait_cond, NULL);
  425. #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
  426. CGGetActiveDisplayList(0, NULL, &num_screens);
  427. #endif
  428. // List devices if requested
  429. if (ctx->list_devices) {
  430. av_log(ctx, AV_LOG_INFO, "AVFoundation video devices:\n");
  431. NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
  432. int index = 0;
  433. for (AVCaptureDevice *device in devices) {
  434. const char *name = [[device localizedName] UTF8String];
  435. index = [devices indexOfObject:device];
  436. av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
  437. index++;
  438. }
  439. #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
  440. if (num_screens > 0) {
  441. CGDirectDisplayID screens[num_screens];
  442. CGGetActiveDisplayList(num_screens, screens, &num_screens);
  443. for (int i = 0; i < num_screens; i++) {
  444. av_log(ctx, AV_LOG_INFO, "[%d] Capture screen %d\n", index + i, i);
  445. }
  446. }
  447. #endif
  448. av_log(ctx, AV_LOG_INFO, "AVFoundation audio devices:\n");
  449. devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
  450. for (AVCaptureDevice *device in devices) {
  451. const char *name = [[device localizedName] UTF8String];
  452. int index = [devices indexOfObject:device];
  453. av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
  454. }
  455. goto fail;
  456. }
  457. // Find capture device
  458. AVCaptureDevice *video_device = nil;
  459. AVCaptureDevice *audio_device = nil;
  460. NSArray *video_devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
  461. ctx->num_video_devices = [video_devices count];
  462. // parse input filename for video and audio device
  463. parse_device_name(s);
  464. // check for device index given in filename
  465. if (ctx->video_device_index == -1 && ctx->video_filename) {
  466. sscanf(ctx->video_filename, "%d", &ctx->video_device_index);
  467. }
  468. if (ctx->audio_device_index == -1 && ctx->audio_filename) {
  469. sscanf(ctx->audio_filename, "%d", &ctx->audio_device_index);
  470. }
  471. if (ctx->video_device_index >= 0) {
  472. if (ctx->video_device_index < ctx->num_video_devices) {
  473. video_device = [video_devices objectAtIndex:ctx->video_device_index];
  474. } else if (ctx->video_device_index < ctx->num_video_devices + num_screens) {
  475. #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
  476. CGDirectDisplayID screens[num_screens];
  477. CGGetActiveDisplayList(num_screens, screens, &num_screens);
  478. AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[ctx->video_device_index - ctx->num_video_devices]] autorelease];
  479. video_device = (AVCaptureDevice*) capture_screen_input;
  480. #endif
  481. } else {
  482. av_log(ctx, AV_LOG_ERROR, "Invalid device index\n");
  483. goto fail;
  484. }
  485. } else if (ctx->video_filename &&
  486. strncmp(ctx->video_filename, "default", 7)) {
  487. // looking for video inputs
  488. for (AVCaptureDevice *device in video_devices) {
  489. if (!strncmp(ctx->video_filename, [[device localizedName] UTF8String], strlen(ctx->video_filename))) {
  490. video_device = device;
  491. break;
  492. }
  493. }
  494. #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
  495. // looking for screen inputs
  496. if (!video_device) {
  497. int idx;
  498. if(sscanf(ctx->video_filename, "Capture screen %d", &idx) && idx < num_screens) {
  499. CGDirectDisplayID screens[num_screens];
  500. CGGetActiveDisplayList(num_screens, screens, &num_screens);
  501. AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[idx]] autorelease];
  502. video_device = (AVCaptureDevice*) capture_screen_input;
  503. ctx->video_device_index = ctx->num_video_devices + idx;
  504. }
  505. }
  506. #endif
  507. if (!video_device) {
  508. av_log(ctx, AV_LOG_ERROR, "Video device not found\n");
  509. goto fail;
  510. }
  511. } else {
  512. video_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
  513. }
  514. // get audio device
  515. if (ctx->audio_device_index >= 0) {
  516. NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
  517. if (ctx->audio_device_index >= [devices count]) {
  518. av_log(ctx, AV_LOG_ERROR, "Invalid audio device index\n");
  519. goto fail;
  520. }
  521. audio_device = [devices objectAtIndex:ctx->audio_device_index];
  522. } else if (ctx->audio_filename &&
  523. strncmp(ctx->audio_filename, "default", 7)) {
  524. NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
  525. for (AVCaptureDevice *device in devices) {
  526. if (!strncmp(ctx->audio_filename, [[device localizedName] UTF8String], strlen(ctx->audio_filename))) {
  527. audio_device = device;
  528. break;
  529. }
  530. }
  531. if (!audio_device) {
  532. av_log(ctx, AV_LOG_ERROR, "Audio device not found\n");
  533. goto fail;
  534. }
  535. } else {
  536. audio_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
  537. }
  538. // Video nor Audio capture device not found, looking for AVMediaTypeVideo/Audio
  539. if (!video_device && !audio_device) {
  540. av_log(s, AV_LOG_ERROR, "No AV capture device found\n");
  541. goto fail;
  542. }
  543. if (video_device) {
  544. if (ctx->video_device_index < ctx->num_video_devices) {
  545. av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device localizedName] UTF8String]);
  546. } else {
  547. av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device description] UTF8String]);
  548. }
  549. }
  550. if (audio_device) {
  551. av_log(s, AV_LOG_DEBUG, "audio device '%s' opened\n", [[audio_device localizedName] UTF8String]);
  552. }
  553. // Initialize capture session
  554. ctx->capture_session = [[AVCaptureSession alloc] init];
  555. if (video_device && add_video_device(s, video_device)) {
  556. goto fail;
  557. }
  558. if (audio_device && add_audio_device(s, audio_device)) {
  559. }
  560. [ctx->capture_session startRunning];
  561. if (video_device && get_video_config(s)) {
  562. goto fail;
  563. }
  564. // set audio stream
  565. if (audio_device && get_audio_config(s)) {
  566. goto fail;
  567. }
  568. [pool release];
  569. return 0;
  570. fail:
  571. [pool release];
  572. destroy_context(ctx);
  573. return AVERROR(EIO);
  574. }
  575. static int avf_read_packet(AVFormatContext *s, AVPacket *pkt)
  576. {
  577. AVFContext* ctx = (AVFContext*)s->priv_data;
  578. do {
  579. lock_frames(ctx);
  580. CVImageBufferRef image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
  581. if (ctx->current_frame != nil) {
  582. if (av_new_packet(pkt, (int)CVPixelBufferGetDataSize(image_buffer)) < 0) {
  583. return AVERROR(EIO);
  584. }
  585. pkt->pts = pkt->dts = av_rescale_q(av_gettime() - ctx->first_pts,
  586. AV_TIME_BASE_Q,
  587. avf_time_base_q);
  588. pkt->stream_index = ctx->video_stream_index;
  589. pkt->flags |= AV_PKT_FLAG_KEY;
  590. CVPixelBufferLockBaseAddress(image_buffer, 0);
  591. void* data = CVPixelBufferGetBaseAddress(image_buffer);
  592. memcpy(pkt->data, data, pkt->size);
  593. CVPixelBufferUnlockBaseAddress(image_buffer, 0);
  594. CFRelease(ctx->current_frame);
  595. ctx->current_frame = nil;
  596. } else if (ctx->current_audio_frame != nil) {
  597. CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
  598. int block_buffer_size = CMBlockBufferGetDataLength(block_buffer);
  599. if (!block_buffer || !block_buffer_size) {
  600. return AVERROR(EIO);
  601. }
  602. if (ctx->audio_non_interleaved && block_buffer_size > ctx->audio_buffer_size) {
  603. return AVERROR_BUFFER_TOO_SMALL;
  604. }
  605. if (av_new_packet(pkt, block_buffer_size) < 0) {
  606. return AVERROR(EIO);
  607. }
  608. pkt->pts = pkt->dts = av_rescale_q(av_gettime() - ctx->first_audio_pts,
  609. AV_TIME_BASE_Q,
  610. avf_time_base_q);
  611. pkt->stream_index = ctx->audio_stream_index;
  612. pkt->flags |= AV_PKT_FLAG_KEY;
  613. if (ctx->audio_non_interleaved) {
  614. int sample, c, shift;
  615. OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, ctx->audio_buffer);
  616. if (ret != kCMBlockBufferNoErr) {
  617. return AVERROR(EIO);
  618. }
  619. int num_samples = pkt->size / (ctx->audio_channels * (ctx->audio_bits_per_sample >> 3));
  620. // transform decoded frame into output format
  621. #define INTERLEAVE_OUTPUT(bps) \
  622. { \
  623. int##bps##_t **src; \
  624. int##bps##_t *dest; \
  625. src = av_malloc(ctx->audio_channels * sizeof(int##bps##_t*)); \
  626. if (!src) return AVERROR(EIO); \
  627. for (c = 0; c < ctx->audio_channels; c++) { \
  628. src[c] = ((int##bps##_t*)ctx->audio_buffer) + c * num_samples; \
  629. } \
  630. dest = (int##bps##_t*)pkt->data; \
  631. shift = bps - ctx->audio_bits_per_sample; \
  632. for (sample = 0; sample < num_samples; sample++) \
  633. for (c = 0; c < ctx->audio_channels; c++) \
  634. *dest++ = src[c][sample] << shift; \
  635. av_freep(&src); \
  636. }
  637. if (ctx->audio_bits_per_sample <= 16) {
  638. INTERLEAVE_OUTPUT(16)
  639. } else {
  640. INTERLEAVE_OUTPUT(32)
  641. }
  642. } else {
  643. OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, pkt->data);
  644. if (ret != kCMBlockBufferNoErr) {
  645. return AVERROR(EIO);
  646. }
  647. }
  648. CFRelease(ctx->current_audio_frame);
  649. ctx->current_audio_frame = nil;
  650. } else {
  651. pkt->data = NULL;
  652. pthread_cond_wait(&ctx->frame_wait_cond, &ctx->frame_lock);
  653. }
  654. unlock_frames(ctx);
  655. } while (!pkt->data);
  656. return 0;
  657. }
  658. static int avf_close(AVFormatContext *s)
  659. {
  660. AVFContext* ctx = (AVFContext*)s->priv_data;
  661. destroy_context(ctx);
  662. return 0;
  663. }
  664. static const AVOption options[] = {
  665. { "frame_rate", "set frame rate", offsetof(AVFContext, frame_rate), AV_OPT_TYPE_FLOAT, { .dbl = 30.0 }, 0.1, 30.0, AV_OPT_TYPE_VIDEO_RATE, NULL },
  666. { "list_devices", "list available devices", offsetof(AVFContext, list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
  667. { "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
  668. { "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
  669. { "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(AVFContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
  670. { "audio_device_index", "select audio device by index for devices with same name (starts at 0)", offsetof(AVFContext, audio_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
  671. { "pixel_format", "set pixel format", offsetof(AVFContext, pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_YUV420P}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
  672. { NULL },
  673. };
  674. static const AVClass avf_class = {
  675. .class_name = "AVFoundation input device",
  676. .item_name = av_default_item_name,
  677. .option = options,
  678. .version = LIBAVUTIL_VERSION_INT,
  679. .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
  680. };
  681. AVInputFormat ff_avfoundation_demuxer = {
  682. .name = "avfoundation",
  683. .long_name = NULL_IF_CONFIG_SMALL("AVFoundation input device"),
  684. .priv_data_size = sizeof(AVFContext),
  685. .read_header = avf_read_header,
  686. .read_packet = avf_read_packet,
  687. .read_close = avf_close,
  688. .flags = AVFMT_NOFILE,
  689. .priv_class = &avf_class,
  690. };