From 18e6f5126073320450438e99b4833cabc3347dba Mon Sep 17 00:00:00 2001 From: Gary Scavone Date: Tue, 1 Apr 2014 12:03:38 -0400 Subject: [PATCH] Added Wasapi code (though untested); Updates for better data format support in PulseAudio; Commented-out various unused function arguments in tests/ files. --- RtAudio.cpp | 1713 ++++++++++++++++++++++++++++++++++++++----- RtAudio.h | 40 +- tests/duplex.cpp | 4 +- tests/playraw.cpp | 4 +- tests/playsaw.cpp | 4 +- tests/record.cpp | 4 +- tests/testall.cpp | 12 +- tests/teststops.cpp | 4 +- 8 files changed, 1595 insertions(+), 190 deletions(-) diff --git a/RtAudio.cpp b/RtAudio.cpp index 7d60e77..d65a7ac 100644 --- a/RtAudio.cpp +++ b/RtAudio.cpp @@ -53,7 +53,7 @@ const unsigned int RtApi::SAMPLE_RATES[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 }; -#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) +#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__) #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A) #define MUTEX_DESTROY(A) DeleteCriticalSection(A) #define MUTEX_LOCK(A) EnterCriticalSection(A) @@ -101,6 +101,9 @@ void RtAudio :: getCompiledApi( std::vector &apis ) throw() #if defined(__WINDOWS_ASIO__) apis.push_back( WINDOWS_ASIO ); #endif +#if defined(__WINDOWS_WASAPI__) + apis.push_back( WINDOWS_WASAPI ); +#endif #if defined(__WINDOWS_DS__) apis.push_back( WINDOWS_DS ); #endif @@ -138,6 +141,10 @@ void RtAudio :: openRtApi( RtAudio::Api api ) if ( api == WINDOWS_ASIO ) rtapi_ = new RtApiAsio(); #endif +#if defined(__WINDOWS_WASAPI__) + if ( api == WINDOWS_WASAPI ) + rtapi_ = new RtApiWasapi(); +#endif #if defined(__WINDOWS_DS__) if ( api == WINDOWS_DS ) rtapi_ = new RtApiDs(); @@ -3562,228 +3569,1580 @@ static const char* getAsioErrorString( ASIOError result ) return "Unknown error."; } + //******************** End of __WINDOWS_ASIO__ *********************// #endif -#if defined(__WINDOWS_DS__) // Windows DirectSound API +#if defined(__WINDOWS_WASAPI__) // Windows WASAPI API -// Modified by Robin Davies, October 2005 -// - Improvements to DirectX pointer chasing. -// - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30. -// - Auto-call CoInitialize for DSOUND and ASIO platforms. -// Various revisions for RtAudio 4.0 by Gary Scavone, April 2007 -// Changed device query structure for RtAudio 4.0.7, January 2010 +#include "RtWasapi.inl" +#include +#include +#include +#include -#include -#include -#include +//============================================================================= -#if defined(__MINGW32__) - // missing from latest mingw winapi -#define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */ -#define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */ -#define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */ -#define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */ -#endif +#define EXIT_ON_ERROR( hr, errorType, errorText )\ +if( FAILED( hr ) )\ +{\ + errorText_ = __FUNCTION__ ": " errorText;\ + error( errorType );\ + goto Exit;\ +} -#define MINIMUM_DEVICE_BUFFER_SIZE 32768 +#define SAFE_RELEASE( objectPtr )\ +if( objectPtr )\ +{\ + objectPtr->Release();\ + objectPtr = NULL;\ +} -#ifdef _MSC_VER // if Microsoft Visual C++ -#pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually. -#endif +typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex ); -static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize ) +//----------------------------------------------------------------------------- + +// WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size. +// Therefore we must perform all necessary conversions to user buffers in order to satisfy these +// requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to +// provide intermediate storage for read / write synchronization. +class WasapiBuffer { - if ( pointer > bufferSize ) pointer -= bufferSize; - if ( laterPointer < earlierPointer ) laterPointer += bufferSize; - if ( pointer < earlierPointer ) pointer += bufferSize; - return pointer >= earlierPointer && pointer < laterPointer; -} +public: + WasapiBuffer() + : buffer_( NULL ), + bufferSize_( 0 ), + inIndex_( 0 ), + outIndex_( 0 ) {} -// A structure to hold various information related to the DirectSound -// API implementation. -struct DsHandle { - unsigned int drainCounter; // Tracks callback counts when draining - bool internalDrain; // Indicates if stop is initiated from callback or not. - void *id[2]; - void *buffer[2]; - bool xrun[2]; - UINT bufferPointer[2]; - DWORD dsBufferSize[2]; - DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by. - HANDLE condition; + ~WasapiBuffer() { + delete buffer_; + } - DsHandle() - :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; } -}; + // sets the length of the internal ring buffer + void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) { + delete buffer_; -// Declarations for utility functions, callbacks, and structures -// specific to the DirectSound implementation. -static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid, - LPCTSTR description, - LPCTSTR module, - LPVOID lpContext ); + buffer_ = ( char* ) calloc( bufferSize, formatBytes ); -static const char* getErrorString( int code ); + bufferSize_ = bufferSize; + inIndex_ = 0; + outIndex_ = 0; + } -static unsigned __stdcall callbackHandler( void *ptr ); + // attempt to push a buffer into the ring buffer at the current "in" index + bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) + { + if( !buffer || // incoming buffer is NULL + bufferSize == 0 || // incoming buffer has no data + bufferSize > bufferSize_ ) // incoming buffer too large + { + return false; + } -struct DsDevice { - LPGUID id[2]; - bool validId[2]; - bool found; - std::string name; + unsigned int relOutIndex = outIndex_; + unsigned int inIndexEnd = inIndex_ + bufferSize; + if( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) + { + relOutIndex += bufferSize_; + } - DsDevice() - : found(false) { validId[0] = false; validId[1] = false; } -}; + // "in" index can end on the "out" index but cannot begin at it + if( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) + { + return false; // not enough space between "in" index and "out" index + } -struct DsProbeData { - bool isInput; - std::vector* dsDevices; + // copy buffer from external to internal + int fromZeroSize = inIndex_ + bufferSize - bufferSize_; + fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; + int fromInSize = bufferSize - fromZeroSize; + + switch( format ) + { + case RTAUDIO_SINT8: + memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) ); + memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) ); + break; + case RTAUDIO_SINT16: + memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) ); + memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) ); + break; + case RTAUDIO_SINT24: + memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) ); + memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) ); + break; + case RTAUDIO_SINT32: + memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) ); + memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) ); + break; + case RTAUDIO_FLOAT32: + memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) ); + memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) ); + break; + case RTAUDIO_FLOAT64: + memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) ); + memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) ); + break; + } + + // update "in" index + inIndex_ += bufferSize; + inIndex_ %= bufferSize_; + + return true; + } + + // attempt to pull a buffer from the ring buffer from the current "out" index + bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format ) + { + if( !buffer || // incoming buffer is NULL + bufferSize == 0 || // incoming buffer has no data + bufferSize > bufferSize_ ) // incoming buffer too large + { + return false; + } + + unsigned int relInIndex = inIndex_; + unsigned int outIndexEnd = outIndex_ + bufferSize; + if( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) + { + relInIndex += bufferSize_; + } + + // "out" index can begin at and end on the "in" index + if( outIndex_ < relInIndex && outIndexEnd > relInIndex ) + { + return false; // not enough space between "out" index and "in" index + } + + // copy buffer from internal to external + int fromZeroSize = outIndex_ + bufferSize - bufferSize_; + fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize; + int fromOutSize = bufferSize - fromZeroSize; + + switch( format ) + { + case RTAUDIO_SINT8: + memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) ); + memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) ); + break; + case RTAUDIO_SINT16: + memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) ); + memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) ); + break; + case RTAUDIO_SINT24: + memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) ); + memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) ); + break; + case RTAUDIO_SINT32: + memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) ); + memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) ); + break; + case RTAUDIO_FLOAT32: + memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) ); + memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) ); + break; + case RTAUDIO_FLOAT64: + memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) ); + memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) ); + break; + } + + // update "out" index + outIndex_ += bufferSize; + outIndex_ %= bufferSize_; + + return true; + } + +private: + char* buffer_; + unsigned int bufferSize_; + unsigned int inIndex_; + unsigned int outIndex_; }; -RtApiDs :: RtApiDs() +//----------------------------------------------------------------------------- + +// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate and +// channel counts between HW and the user. The convertBufferWasapi function is used to perform +// these conversions between HwIn->UserIn and UserOut->HwOut during the stream callback loop. +// This sample rate converter favors speed over quality, and works best with conversions between +// one rate and its multiple. RtApiWasapi will not populate a device's sample rate list with rates +// that may cause artifacts via this conversion. +void convertBufferWasapi( char* outBuffer, + const char* inBuffer, + const unsigned int& inChannelCount, + const unsigned int& outChannelCount, + const unsigned int& inSampleRate, + const unsigned int& outSampleRate, + const unsigned int& inSampleCount, + unsigned int& outSampleCount, + const RtAudioFormat& format ) { - // Dsound will run both-threaded. If CoInitialize fails, then just - // accept whatever the mainline chose for a threading model. - coInitialized_ = false; - HRESULT hr = CoInitialize( NULL ); - if ( !FAILED( hr ) ) coInitialized_ = true; + // calculate the new outSampleCount and relative sampleStep + float sampleRatio = ( float ) outSampleRate / inSampleRate; + float sampleStep = 1.0f / sampleRatio; + float inSampleFraction = 0.0f; + unsigned int commonChannelCount = min( inChannelCount, outChannelCount ); + + outSampleCount = ( unsigned int ) ( inSampleCount * sampleRatio ); + + // frame-by-frame, copy each relative input sample into it's corresponding output sample + for( unsigned int outSample = 0; outSample < outSampleCount; outSample++ ) + { + unsigned int inSample = ( unsigned int ) inSampleFraction; + + switch( format ) + { + case RTAUDIO_SINT8: + memcpy( &( ( char* ) outBuffer )[ outSample * outChannelCount ], &( ( char* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( char ) ); + break; + case RTAUDIO_SINT16: + memcpy( &( ( short* ) outBuffer )[ outSample * outChannelCount ], &( ( short* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( short ) ); + break; + case RTAUDIO_SINT24: + memcpy( &( ( S24* ) outBuffer )[ outSample * outChannelCount ], &( ( S24* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( S24 ) ); + break; + case RTAUDIO_SINT32: + memcpy( &( ( int* ) outBuffer )[ outSample * outChannelCount ], &( ( int* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( int ) ); + break; + case RTAUDIO_FLOAT32: + memcpy( &( ( float* ) outBuffer )[ outSample * outChannelCount ], &( ( float* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( float ) ); + break; + case RTAUDIO_FLOAT64: + memcpy( &( ( double* ) outBuffer )[ outSample * outChannelCount ], &( ( double* ) inBuffer )[ inSample * inChannelCount ], commonChannelCount * sizeof( double ) ); + break; + } + + // jump to next in sample + inSampleFraction += sampleStep; + } } -RtApiDs :: ~RtApiDs() +//----------------------------------------------------------------------------- + +// A structure to hold various information related to the WASAPI implementation. +struct WasapiHandle { - if ( coInitialized_ ) CoUninitialize(); // balanced call. - if ( stream_.state != STREAM_CLOSED ) closeStream(); -} + IAudioClient* captureAudioClient; + IAudioClient* renderAudioClient; + IAudioCaptureClient* captureClient; + IAudioRenderClient* renderClient; + HANDLE captureEvent; + HANDLE renderEvent; + + WasapiHandle() + : captureAudioClient( NULL ), + renderAudioClient( NULL ), + captureClient( NULL ), + renderClient( NULL ), + captureEvent( NULL ), + renderEvent( NULL ) {} +}; -// The DirectSound default output is always the first device. -unsigned int RtApiDs :: getDefaultOutputDevice( void ) +//============================================================================= + +RtApiWasapi::RtApiWasapi() + : coInitialized_( false ), deviceEnumerator_( NULL ) { - return 0; + // WASAPI can run either apartment or multi-threaded + HRESULT hr = CoInitialize( NULL ); + + if( !FAILED( hr ) ) + coInitialized_ = true; + + // instantiate device enumerator + hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL, + CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ), + ( void** ) &deviceEnumerator_ ); + + if( FAILED( hr ) ) { + errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator"; + error( RtAudioError::DRIVER_ERROR ); + } } -// The DirectSound default input is always the first input device, -// which is the first capture device enumerated. -unsigned int RtApiDs :: getDefaultInputDevice( void ) +//----------------------------------------------------------------------------- + +RtApiWasapi::~RtApiWasapi() { - return 0; + // if this object previously called CoInitialize() + if( coInitialized_ ) + { + CoUninitialize(); + } + + if( stream_.state != STREAM_CLOSED ) + { + closeStream(); + } + + SAFE_RELEASE( deviceEnumerator_ ); } -unsigned int RtApiDs :: getDeviceCount( void ) +//============================================================================= + +unsigned int RtApiWasapi::getDeviceCount( void ) { - // Set query flag for previously found devices to false, so that we - // can check for any devices that have disappeared. - for ( unsigned int i=0; iEnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" ); - // Clean out any devices that may have disappeared. - std::vector< int > indices; - for ( unsigned int i=0; iGetCount( &captureDeviceCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" ); - return static_cast(dsDevices.size()); + // count render devices + hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" ); + + hr = renderDevices->GetCount( &renderDeviceCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" ); + +Exit: + // release all references + SAFE_RELEASE( captureDevices ); + SAFE_RELEASE( renderDevices ); + + return captureDeviceCount + renderDeviceCount; } -RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device ) +//----------------------------------------------------------------------------- + +RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device ) { RtAudio::DeviceInfo info; + unsigned int captureDeviceCount = 0; + unsigned int renderDeviceCount = 0; + std::wstring deviceName; + std::string defaultDeviceName; + bool isCaptureDevice = false; + + PROPVARIANT deviceNameProp; + PROPVARIANT defaultDeviceNameProp; + + IMMDeviceCollection* captureDevices = NULL; + IMMDeviceCollection* renderDevices = NULL; + IMMDevice* devicePtr = NULL; + IMMDevice* defaultDevicePtr = NULL; + IAudioClient* audioClient = NULL; + IPropertyStore* devicePropStore = NULL; + IPropertyStore* defaultDevicePropStore = NULL; + + WAVEFORMATEX* deviceFormat = NULL; + WAVEFORMATEX* closestMatchFormat = NULL; + + // probed info.probed = false; - if ( dsDevices.size() == 0 ) { - // Force a query of all devices - getDeviceCount(); - if ( dsDevices.size() == 0 ) { - errorText_ = "RtApiDs::getDeviceInfo: no devices found!"; - error( RtAudioError::INVALID_USE ); - return info; - } - } + // count capture devices + HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" ); - if ( device >= dsDevices.size() ) { - errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!"; - error( RtAudioError::INVALID_USE ); - return info; - } + hr = captureDevices->GetCount( &captureDeviceCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" ); - HRESULT result; - if ( dsDevices[ device ].validId[0] == false ) goto probeInput; + // count render devices + hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" ); - LPDIRECTSOUND output; - DSCAPS outCaps; - result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL ); - if ( FAILED( result ) ) { - errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!"; - errorText_ = errorStream_.str(); - error( RtAudioError::WARNING ); - goto probeInput; - } + hr = renderDevices->GetCount( &renderDeviceCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" ); - outCaps.dwSize = sizeof( outCaps ); - result = output->GetCaps( &outCaps ); - if ( FAILED( result ) ) { - output->Release(); - errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!"; - errorText_ = errorStream_.str(); - error( RtAudioError::WARNING ); - goto probeInput; + // validate device index + if ( device >= captureDeviceCount + renderDeviceCount ) + EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Invalid device index" ); + + // determine whether index falls within capture or render devices + if ( device < captureDeviceCount ) { + hr = captureDevices->Item( device, &devicePtr ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device handle" ); + + isCaptureDevice = true; } + else { + hr = renderDevices->Item( device - captureDeviceCount, &devicePtr ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device handle" ); - // Get output channel information. - info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1; + isCaptureDevice = false; + } - // Get sample rate information. - info.sampleRates.clear(); - for ( unsigned int k=0; k= (unsigned int) outCaps.dwMinSecondarySampleRate && - SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) - info.sampleRates.push_back( SAMPLE_RATES[k] ); + // get default device name + if ( isCaptureDevice ) { + hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default render device handle" ); + } + else { + hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default capture device handle" ); } - // Get format information. - if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16; - if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8; + hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to open default device property store" ); - output->Release(); + PropVariantInit( &defaultDeviceNameProp ); - if ( getDefaultOutputDevice() == device ) - info.isDefaultOutput = true; + hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve default device property: PKEY_Device_FriendlyName" ); - if ( dsDevices[ device ].validId[1] == false ) { - info.name = dsDevices[ device ].name; - info.probed = true; - return info; - } + deviceName = defaultDeviceNameProp.pwszVal; + defaultDeviceName = std::string( deviceName.begin(), deviceName.end() ); - probeInput: + // name + hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to open device property store" ); - LPDIRECTSOUNDCAPTURE input; - result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL ); + PropVariantInit( &deviceNameProp ); + + hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device property: PKEY_Device_FriendlyName" ); + + deviceName = deviceNameProp.pwszVal; + info.name = std::string( deviceName.begin(), deviceName.end() ); + + // is default + if ( isCaptureDevice ) { + info.isDefaultInput = info.name == defaultDeviceName; + info.isDefaultOutput = false; + } + else { + info.isDefaultInput = false; + info.isDefaultOutput = info.name == defaultDeviceName; + } + + // channel count + hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" ); + + hr = audioClient->GetMixFormat( &deviceFormat ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" ); + + if ( isCaptureDevice ) { + info.inputChannels = deviceFormat->nChannels; + info.outputChannels = 0; + info.duplexChannels = 0; + } + else { + info.inputChannels = 0; + info.outputChannels = deviceFormat->nChannels; + info.duplexChannels = 0; + } + + // sample rates + info.sampleRates.clear(); + + // allow support for sample rates that are multiples of the base rate + for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) { + if ( SAMPLE_RATES[i] < deviceFormat->nSamplesPerSec ) { + if ( deviceFormat->nSamplesPerSec % SAMPLE_RATES[i] == 0 ) { + info.sampleRates.push_back( SAMPLE_RATES[i] ); + } + } + else { + if ( SAMPLE_RATES[i] % deviceFormat->nSamplesPerSec == 0 ) { + info.sampleRates.push_back( SAMPLE_RATES[i] ); + } + } + } + + // native format + info.nativeFormats = 0; + + if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT || + ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && + ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) ) + { + if ( deviceFormat->wBitsPerSample == 32 ) { + info.nativeFormats |= RTAUDIO_FLOAT32; + } + else if ( deviceFormat->wBitsPerSample == 64 ) { + info.nativeFormats |= RTAUDIO_FLOAT64; + } + } + else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM || + ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE && + ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) ) + { + if ( deviceFormat->wBitsPerSample == 8 ) { + info.nativeFormats |= RTAUDIO_SINT8; + } + else if ( deviceFormat->wBitsPerSample == 16 ) { + info.nativeFormats |= RTAUDIO_SINT16; + } + else if ( deviceFormat->wBitsPerSample == 24 ) { + info.nativeFormats |= RTAUDIO_SINT24; + } + else if ( deviceFormat->wBitsPerSample == 32 ) { + info.nativeFormats |= RTAUDIO_SINT32; + } + } + + // probed + info.probed = true; + +Exit: + // release all references + PropVariantClear( &deviceNameProp ); + PropVariantClear( &defaultDeviceNameProp ); + + SAFE_RELEASE( captureDevices ); + SAFE_RELEASE( renderDevices ); + SAFE_RELEASE( devicePtr ); + SAFE_RELEASE( defaultDevicePtr ); + SAFE_RELEASE( audioClient ); + SAFE_RELEASE( devicePropStore ); + SAFE_RELEASE( defaultDevicePropStore ); + + CoTaskMemFree( deviceFormat ); + CoTaskMemFree( closestMatchFormat ); + + return info; +} + +//----------------------------------------------------------------------------- + +unsigned int RtApiWasapi::getDefaultOutputDevice( void ) +{ + for ( unsigned int i = 0; i < getDeviceCount(); i++ ) { + if ( getDeviceInfo( i ).isDefaultOutput ) { + return i; + } + } + + return 0; +} + +//----------------------------------------------------------------------------- + +unsigned int RtApiWasapi::getDefaultInputDevice( void ) +{ + for ( unsigned int i = 0; i < getDeviceCount(); i++ ) { + if ( getDeviceInfo( i ).isDefaultInput ) { + return i; + } + } + + return 0; +} + +//----------------------------------------------------------------------------- + +void RtApiWasapi::closeStream( void ) +{ + if ( stream_.state == STREAM_CLOSED ) { + errorText_ = "RtApiWasapi::closeStream: No open stream to close"; + error( RtAudioError::WARNING ); + return; + } + + if ( stream_.state != STREAM_STOPPED ) + stopStream(); + + // clean up stream memory + SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) + SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) + + SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient ) + SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient ) + + if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent ) + CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent ); + + if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent ) + CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent ); + + delete stream_.apiHandle; + stream_.apiHandle = NULL; + + for ( int i = 0; i < 2; i++ ) { + if ( stream_.userBuffer[i] ) { + free( stream_.userBuffer[i] ); + stream_.userBuffer[i] = 0; + } + } + + if ( stream_.deviceBuffer ) { + free( stream_.deviceBuffer ); + stream_.deviceBuffer = 0; + } + + // update stream state + stream_.state = STREAM_CLOSED; +} + +//----------------------------------------------------------------------------- + +void RtApiWasapi::startStream( void ) +{ + verifyStream(); + + if ( stream_.state == STREAM_RUNNING ) { + errorText_ = "RtApiWasapi::startStream: The stream is already running"; + error( RtAudioError::WARNING ); + return; + } + + // update stream state + stream_.state = STREAM_RUNNING; + + // create WASAPI stream thread + stream_.callbackInfo.thread = ( unsigned int ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL ); + + if ( !stream_.callbackInfo.thread ) { + errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread"; + error( RtAudioError::THREAD_ERROR ); + } + else { + SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority ); + ResumeThread( ( void* ) stream_.callbackInfo.thread ); + } +} + +//----------------------------------------------------------------------------- + +void RtApiWasapi::stopStream( void ) +{ + verifyStream(); + + if ( stream_.state == STREAM_STOPPED ) { + errorText_ = "RtApiWasapi::stopStream: The stream is already stopped"; + error( RtAudioError::WARNING ); + return; + } + + // inform stream thread by setting stream state to STREAM_STOPPING + stream_.state = STREAM_STOPPING; + + // wait until stream thread is stopped + while( stream_.state != STREAM_STOPPED ) { + Sleep( 1 ); + } + + // Wait for the last buffer to play before stopping. + Sleep( 1000 * stream_.bufferSize / stream_.sampleRate ); + + // stop capture client if applicable + if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) { + HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop(); + if ( FAILED( hr ) ) { + errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream"; + error( RtAudioError::DRIVER_ERROR ); + } + } + + // stop render client if applicable + if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) { + HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop(); + if ( FAILED( hr ) ) { + errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream"; + error( RtAudioError::DRIVER_ERROR ); + } + } + + // close thread handle + if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) { + errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread"; + error( RtAudioError::THREAD_ERROR ); + } + + stream_.callbackInfo.thread = NULL; +} + +//----------------------------------------------------------------------------- + +void RtApiWasapi::abortStream( void ) +{ + verifyStream(); + + if ( stream_.state == STREAM_STOPPED ) { + errorText_ = "RtApiWasapi::abortStream: The stream is already stopped"; + error( RtAudioError::WARNING ); + return; + } + + // inform stream thread by setting stream state to STREAM_STOPPING + stream_.state = STREAM_STOPPING; + + // wait until stream thread is stopped + while ( stream_.state != STREAM_STOPPED ) { + Sleep( 1 ); + } + + // stop capture client if applicable + if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) { + HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop(); + if ( FAILED( hr ) ) { + errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream"; + error( RtAudioError::DRIVER_ERROR ); + } + } + + // stop render client if applicable + if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) { + HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop(); + if ( FAILED( hr ) ) { + errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream"; + error( RtAudioError::DRIVER_ERROR ); + } + } + + // close thread handle + if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) { + errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread"; + error( RtAudioError::THREAD_ERROR ); + } + + stream_.callbackInfo.thread = NULL; +} + +//----------------------------------------------------------------------------- + +bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, + unsigned int firstChannel, unsigned int sampleRate, + RtAudioFormat format, unsigned int* bufferSize, + RtAudio::StreamOptions* options ) +{ + bool methodResult = FAILURE; + unsigned int captureDeviceCount = 0; + unsigned int renderDeviceCount = 0; + + IMMDeviceCollection* captureDevices = NULL; + IMMDeviceCollection* renderDevices = NULL; + IMMDevice* devicePtr = NULL; + WAVEFORMATEX* deviceFormat = NULL; + + // create API Handle if not already created + if ( !stream_.apiHandle ) + stream_.apiHandle = ( void* ) new WasapiHandle(); + + // count capture devices + HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device collection" ); + + hr = captureDevices->GetCount( &captureDeviceCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device count" ); + + // count render devices + hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device collection" ); + + hr = renderDevices->GetCount( &renderDeviceCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device count" ); + + // validate device index + if ( device >= captureDeviceCount + renderDeviceCount ) + EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Invalid device index" ); + + // determine whether index falls within capture or render devices + if ( device < captureDeviceCount ) { + if ( mode != INPUT ) + EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Capture device selected as output device" ); + + // retrieve captureAudioClient from devicePtr + IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient; + + hr = captureDevices->Item( device, &devicePtr ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture device handle" ); + + hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, + NULL, ( void** ) &captureAudioClient ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" ); + + hr = captureAudioClient->GetMixFormat( &deviceFormat ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" ); + + stream_.nDeviceChannels[mode] = deviceFormat->nChannels; + captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] ); + } + else { + if ( mode != OUTPUT ) + EXIT_ON_ERROR( -1, RtAudioError::INVALID_USE, "Render device selected as input device" ); + + // retrieve renderAudioClient from devicePtr + IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient; + + hr = renderDevices->Item( device - captureDeviceCount, &devicePtr ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render device handle" ); + + hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, + NULL, ( void** ) &renderAudioClient ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device audio client" ); + + hr = renderAudioClient->GetMixFormat( &deviceFormat ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" ); + + stream_.nDeviceChannels[mode] = deviceFormat->nChannels; + renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] ); + } + + // fill stream data + if ( ( stream_.mode == OUTPUT && mode == INPUT ) || + ( stream_.mode == INPUT && mode == OUTPUT ) ) { + stream_.mode = DUPLEX; + } + else { + stream_.mode = mode; + } + + stream_.device[mode] = device; + stream_.state = STREAM_STOPPED; + stream_.doByteSwap[mode] = false; + stream_.sampleRate = sampleRate; + stream_.bufferSize = *bufferSize; + stream_.nBuffers = 1; + stream_.nUserChannels[mode] = channels; + stream_.channelOffset[mode] = firstChannel; + stream_.userFormat = format; + stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats; + + if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) + stream_.userInterleaved = false; + else + stream_.userInterleaved = true; + stream_.deviceInterleaved[mode] = true; + + // Set flags for buffer conversion. + stream_.doConvertBuffer[mode] = false; + if ( stream_.userFormat != stream_.deviceFormat[mode] ) + stream_.doConvertBuffer[mode] = true; + if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] && + stream_.nUserChannels[mode] > 1 ) + stream_.doConvertBuffer[mode] = true; + + if ( stream_.doConvertBuffer[mode] ) + setConvertInfo( mode, 0 ); + + // Allocate necessary internal buffers + unsigned int bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat ); + + stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 ); + if ( !stream_.userBuffer[mode] ) + EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating user buffer memory" ); + + if ( stream_.doConvertBuffer[mode] && !stream_.deviceBuffer ) { + unsigned int deviceBufferSize = max( stream_.nUserChannels[INPUT] * stream_.bufferSize * formatBytes( stream_.userFormat ), + stream_.nUserChannels[OUTPUT] * stream_.bufferSize * formatBytes( stream_.userFormat ) ); + + stream_.deviceBuffer = ( char* ) calloc( deviceBufferSize, 1 ); + if ( !stream_.deviceBuffer ) + EXIT_ON_ERROR( -1, RtAudioError::MEMORY_ERROR, "Error allocating device buffer memory" ); + } + + if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) + stream_.callbackInfo.priority = 15; + else + stream_.callbackInfo.priority = 0; + + ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback + ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode + + methodResult = SUCCESS; + +Exit: + //clean up + + SAFE_RELEASE( captureDevices ); + SAFE_RELEASE( renderDevices ); + SAFE_RELEASE( devicePtr ); + + CoTaskMemFree( deviceFormat ); + + // if method failed, close the stream + if ( methodResult == FAILURE ) + closeStream(); + + return methodResult; +} + +//============================================================================= + +DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr ) +{ + if ( wasapiPtr ) + ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread(); + + return 0; +} + +DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr ) +{ + if ( wasapiPtr ) + ( ( RtApiWasapi* ) wasapiPtr )->stopStream(); + + return 0; +} + +DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr ) +{ + if ( wasapiPtr ) + ( ( RtApiWasapi* ) wasapiPtr )->abortStream(); + + return 0; +} + +//----------------------------------------------------------------------------- + +void RtApiWasapi::wasapiThread() +{ + // as this is a new thread, we must CoInitialize it + CoInitialize( NULL ); + + HRESULT hr; + + IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient; + IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient; + IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient; + IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient; + HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent; + HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent; + + WAVEFORMATEX* captureFormat = NULL; + WAVEFORMATEX* renderFormat = NULL; + float captureSrRatio = 0.0f; + float renderSrRatio = 0.0f; + WasapiBuffer captureBuffer; + WasapiBuffer renderBuffer; + + // Attempt to assign "Pro Audio" characteristic to thread + HMODULE AvrtDll = LoadLibrary( "AVRT.dll" ); + if ( AvrtDll ) { + DWORD taskIndex = 0; + TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" ); + AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex ); + FreeLibrary( AvrtDll ); + } + + // start capture stream if applicable + if ( captureAudioClient ) { + hr = captureAudioClient->GetMixFormat( &captureFormat ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" ); + + captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate ); + + // initialize capture stream according to desire buffer size + float desiredBufferSize = stream_.bufferSize * captureSrRatio; + REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec ); + + if ( !captureClient ) { + hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, + AUDCLNT_STREAMFLAGS_EVENTCALLBACK, + desiredBufferPeriod, + desiredBufferPeriod, + captureFormat, + NULL ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize capture audio client" ); + + hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ), + ( void** ) &captureClient ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture client handle" ); + + // configure captureEvent to trigger on every available capture buffer + captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL ); + if ( !captureEvent ) + EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create capture event" ); + + hr = captureAudioClient->SetEventHandle( captureEvent ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set capture event handle" ); + + ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient; + ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent; + } + + unsigned int inBufferSize = 0; + hr = captureAudioClient->GetBufferSize( &inBufferSize ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get capture buffer size" ); + + // scale outBufferSize according to stream->user sample rate ratio + // (outBufferSize must be a multiple of the input channel count) + unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio ); + if ( outBufferSize % stream_.nDeviceChannels[INPUT] ) + outBufferSize += stream_.nDeviceChannels[INPUT] - ( outBufferSize % stream_.nDeviceChannels[INPUT] ); + + inBufferSize *= stream_.nDeviceChannels[INPUT]; + + // set captureBuffer size + captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) ); + + // reset the capture stream + hr = captureAudioClient->Reset(); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset capture stream" ); + + // start the capture stream + hr = captureAudioClient->Start(); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start capture stream" ); + } + + // start render stream if applicable + if ( renderAudioClient ) { + hr = renderAudioClient->GetMixFormat( &renderFormat ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve device mix format" ); + + renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate ); + + // initialize render stream according to desire buffer size + float desiredBufferSize = stream_.bufferSize * renderSrRatio; + REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec ); + + if ( !renderClient ) { + hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, + AUDCLNT_STREAMFLAGS_EVENTCALLBACK, + desiredBufferPeriod, + desiredBufferPeriod, + renderFormat, + NULL ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to initialize render audio client" ); + + hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ), + ( void** ) &renderClient ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render client handle" ); + + // configure renderEvent to trigger on every available render buffer + renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL ); + if ( !renderEvent ) + EXIT_ON_ERROR( -1, RtAudioError::SYSTEM_ERROR, "Unable to create render event" ); + + hr = renderAudioClient->SetEventHandle( renderEvent ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to set render event handle" ); + + ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient; + ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent; + } + + unsigned int outBufferSize = 0; + hr = renderAudioClient->GetBufferSize( &outBufferSize ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to get render buffer size" ); + + // scale inBufferSize according to user->stream sample rate ratio + // (inBufferSize must be a multiple of the output channel count) + unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio ); + if ( inBufferSize % stream_.nDeviceChannels[OUTPUT] ) { + inBufferSize += stream_.nDeviceChannels[OUTPUT] - ( inBufferSize % stream_.nDeviceChannels[OUTPUT] ); + } + + outBufferSize *= stream_.nDeviceChannels[OUTPUT]; + + // set renderBuffer size + renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) ); + + // reset the render stream + hr = renderAudioClient->Reset(); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to reset render stream" ); + + // start the render stream + hr = renderAudioClient->Start(); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to start render stream" ); + } + + // declare local stream variables + RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback; + + BYTE* streamBuffer = NULL; + unsigned long captureFlags = 0; + + unsigned int bufferFrameCount = 0; + unsigned int numFramesPadding = 0; + unsigned int convBufferSize = 0; + + bool callbackPushed = false; + bool callbackPulled = false; + bool callbackStopped = false; + + int callbackResult = 0; + + // convBuffer is used to store converted buffers between WASAPI and the user + char* convBuffer = NULL; + + if ( stream_.mode == INPUT ) { + convBuffer = ( char* ) malloc( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio * formatBytes( stream_.deviceFormat[INPUT] ) ) ); + } + else if ( stream_.mode == OUTPUT ) { + convBuffer = ( char* ) malloc( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio * formatBytes( stream_.deviceFormat[OUTPUT] ) ) ); + } + else if ( stream_.mode == DUPLEX ) { + convBuffer = ( char* ) malloc( max( ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio * formatBytes( stream_.deviceFormat[INPUT] ) ), + ( size_t ) ( stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * renderSrRatio * formatBytes( stream_.deviceFormat[OUTPUT] ) ) ) ); + } + + // stream process loop + while ( stream_.state != STREAM_STOPPING ) { + if ( !callbackPulled ) { + // Callback Input + // ============== + // 1. Pull callback buffer from inputBuffer + // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count + // Convert callback buffer to user format + + if ( captureAudioClient ) { + // Pull callback buffer from inputBuffer + callbackPulled = captureBuffer.pullBuffer( convBuffer, + ( unsigned int ) ( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * captureSrRatio ), + stream_.deviceFormat[INPUT] ); + + if ( callbackPulled ) { + // Convert callback buffer to user sample rate and channel count + convertBufferWasapi( stream_.deviceBuffer, + convBuffer, + stream_.nDeviceChannels[INPUT], + stream_.nUserChannels[INPUT], + captureFormat->nSamplesPerSec, + stream_.sampleRate, + ( unsigned int ) ( stream_.bufferSize * captureSrRatio ), + convBufferSize, + stream_.deviceFormat[INPUT] ); + + if ( stream_.doConvertBuffer[INPUT] ) { + // Convert callback buffer to user format + convertBuffer( stream_.userBuffer[INPUT], + stream_.deviceBuffer, + stream_.convertInfo[INPUT] ); + } + else { + // no conversion, simple copy deviceBuffer to userBuffer + memcpy( stream_.userBuffer[INPUT], + stream_.deviceBuffer, + stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) ); + } + } + } + else { + // if there is no capture stream, set callbackPulled flag + callbackPulled = true; + } + + // Execute Callback + // ================ + // 1. Execute user callback method + // 2. Handle return value from callback + + // if callback has not requested the stream to stop + if ( callbackPulled && !callbackStopped ) { + // Execute user callback method + callbackResult = callback( stream_.userBuffer[OUTPUT], + stream_.userBuffer[INPUT], + stream_.bufferSize, + getStreamTime(), + captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0, + stream_.callbackInfo.userData ); + + // Handle return value from callback + if ( callbackResult == 1 ) { + // instantiate a thread to stop this thread + HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, NULL, NULL ); + + if ( !threadHandle ) { + EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream stop thread" ); + } + else if ( !CloseHandle( threadHandle ) ) { + EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream stop thread handle" ); + } + + callbackStopped = true; + } + else if ( callbackResult == 2 ) { + // instantiate a thread to stop this thread + HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, NULL, NULL ); + + if ( !threadHandle ) { + EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to instantiate stream abort thread" ); + } + else if ( !CloseHandle( threadHandle ) ) { + EXIT_ON_ERROR( -1, RtAudioError::THREAD_ERROR, "Unable to close stream abort thread handle" ); + } + + callbackStopped = true; + } + } + } + + // Callback Output + // =============== + // 1. Convert callback buffer to stream format + // 2. Convert callback buffer to stream sample rate and channel count + // 3. Push callback buffer into outputBuffer + + if ( renderAudioClient && callbackPulled ) { + if ( stream_.doConvertBuffer[OUTPUT] ) { + // Convert callback buffer to stream format + convertBuffer( stream_.deviceBuffer, + stream_.userBuffer[OUTPUT], + stream_.convertInfo[OUTPUT] ); + + // Convert callback buffer to stream sample rate and channel count + convertBufferWasapi( convBuffer, + stream_.deviceBuffer, + stream_.nUserChannels[OUTPUT], + stream_.nDeviceChannels[OUTPUT], + stream_.sampleRate, + renderFormat->nSamplesPerSec, + stream_.bufferSize, + convBufferSize, + stream_.deviceFormat[OUTPUT] ); + } + else { + // Convert callback buffer to stream sample rate and channel count + convertBufferWasapi( convBuffer, + stream_.userBuffer[OUTPUT], + stream_.nUserChannels[OUTPUT], + stream_.nDeviceChannels[OUTPUT], + stream_.sampleRate, + renderFormat->nSamplesPerSec, + stream_.bufferSize, + convBufferSize, + stream_.deviceFormat[OUTPUT] ); + } + + // Push callback buffer into outputBuffer + callbackPushed = renderBuffer.pushBuffer( convBuffer, + convBufferSize * stream_.nDeviceChannels[OUTPUT], + stream_.deviceFormat[OUTPUT] ); + } + + // Stream Capture + // ============== + // 1. Get capture buffer from stream + // 2. Push capture buffer into inputBuffer + // 3. If 2. was successful: Release capture buffer + + if ( captureAudioClient ) { + // if the callback input buffer was not pulled from captureBuffer, wait for next capture event + if ( !callbackPulled ) { + WaitForSingleObject( captureEvent, INFINITE ); + } + + // Get capture buffer from stream + hr = captureClient->GetBuffer( &streamBuffer, + &bufferFrameCount, + &captureFlags, NULL, NULL ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve capture buffer" ); + + if ( bufferFrameCount != 0 ) { + // Push capture buffer into inputBuffer + if ( captureBuffer.pushBuffer( ( char* ) streamBuffer, + bufferFrameCount * stream_.nDeviceChannels[INPUT], + stream_.deviceFormat[INPUT] ) ) + { + // Release capture buffer + hr = captureClient->ReleaseBuffer( bufferFrameCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" ); + } + else + { + // Inform WASAPI that capture was unsuccessful + hr = captureClient->ReleaseBuffer( 0 ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" ); + } + } + else + { + // Inform WASAPI that capture was unsuccessful + hr = captureClient->ReleaseBuffer( 0 ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release capture buffer" ); + } + } + + // Stream Render + // ============= + // 1. Get render buffer from stream + // 2. Pull next buffer from outputBuffer + // 3. If 2. was successful: Fill render buffer with next buffer + // Release render buffer + + if ( renderAudioClient ) { + // if the callback output buffer was not pushed to renderBuffer, wait for next render event + if ( callbackPulled && !callbackPushed ) { + WaitForSingleObject( renderEvent, INFINITE ); + } + + // Get render buffer from stream + hr = renderAudioClient->GetBufferSize( &bufferFrameCount ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer size" ); + + hr = renderAudioClient->GetCurrentPadding( &numFramesPadding ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer padding" ); + + bufferFrameCount -= numFramesPadding; + + if ( bufferFrameCount != 0 ) { + hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to retrieve render buffer" ); + + // Pull next buffer from outputBuffer + // Fill render buffer with next buffer + if ( renderBuffer.pullBuffer( ( char* ) streamBuffer, + bufferFrameCount * stream_.nDeviceChannels[OUTPUT], + stream_.deviceFormat[OUTPUT] ) ) + { + // Release render buffer + hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" ); + } + else + { + // Inform WASAPI that render was unsuccessful + hr = renderClient->ReleaseBuffer( 0, 0 ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" ); + } + } + else + { + // Inform WASAPI that render was unsuccessful + hr = renderClient->ReleaseBuffer( 0, 0 ); + EXIT_ON_ERROR( hr, RtAudioError::DRIVER_ERROR, "Unable to release render buffer" ); + } + } + + // if the callback buffer was pushed renderBuffer reset callbackPulled flag + if ( callbackPushed ) { + callbackPulled = false; + } + + // tick stream time + RtApi::tickStreamTime(); + } + +Exit: + // clean up + CoTaskMemFree( captureFormat ); + CoTaskMemFree( renderFormat ); + + delete convBuffer; + + CoUninitialize(); + + // update stream state + stream_.state = STREAM_STOPPED; +} + +//******************** End of __WINDOWS_WASAPI__ *********************// +#endif + + +#if defined(__WINDOWS_DS__) // Windows DirectSound API + +// Modified by Robin Davies, October 2005 +// - Improvements to DirectX pointer chasing. +// - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30. +// - Auto-call CoInitialize for DSOUND and ASIO platforms. +// Various revisions for RtAudio 4.0 by Gary Scavone, April 2007 +// Changed device query structure for RtAudio 4.0.7, January 2010 + +#include +#include +#include + +#if defined(__MINGW32__) + // missing from latest mingw winapi +#define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */ +#define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */ +#define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */ +#define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */ +#endif + +#define MINIMUM_DEVICE_BUFFER_SIZE 32768 + +#ifdef _MSC_VER // if Microsoft Visual C++ +#pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually. +#endif + +static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize ) +{ + if ( pointer > bufferSize ) pointer -= bufferSize; + if ( laterPointer < earlierPointer ) laterPointer += bufferSize; + if ( pointer < earlierPointer ) pointer += bufferSize; + return pointer >= earlierPointer && pointer < laterPointer; +} + +// A structure to hold various information related to the DirectSound +// API implementation. +struct DsHandle { + unsigned int drainCounter; // Tracks callback counts when draining + bool internalDrain; // Indicates if stop is initiated from callback or not. + void *id[2]; + void *buffer[2]; + bool xrun[2]; + UINT bufferPointer[2]; + DWORD dsBufferSize[2]; + DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by. + HANDLE condition; + + DsHandle() + :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; } +}; + +// Declarations for utility functions, callbacks, and structures +// specific to the DirectSound implementation. +static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid, + LPCTSTR description, + LPCTSTR module, + LPVOID lpContext ); + +static const char* getErrorString( int code ); + +static unsigned __stdcall callbackHandler( void *ptr ); + +struct DsDevice { + LPGUID id[2]; + bool validId[2]; + bool found; + std::string name; + + DsDevice() + : found(false) { validId[0] = false; validId[1] = false; } +}; + +struct DsProbeData { + bool isInput; + std::vector* dsDevices; +}; + +RtApiDs :: RtApiDs() +{ + // Dsound will run both-threaded. If CoInitialize fails, then just + // accept whatever the mainline chose for a threading model. + coInitialized_ = false; + HRESULT hr = CoInitialize( NULL ); + if ( !FAILED( hr ) ) coInitialized_ = true; +} + +RtApiDs :: ~RtApiDs() +{ + if ( coInitialized_ ) CoUninitialize(); // balanced call. + if ( stream_.state != STREAM_CLOSED ) closeStream(); +} + +// The DirectSound default output is always the first device. +unsigned int RtApiDs :: getDefaultOutputDevice( void ) +{ + return 0; +} + +// The DirectSound default input is always the first input device, +// which is the first capture device enumerated. +unsigned int RtApiDs :: getDefaultInputDevice( void ) +{ + return 0; +} + +unsigned int RtApiDs :: getDeviceCount( void ) +{ + // Set query flag for previously found devices to false, so that we + // can check for any devices that have disappeared. + for ( unsigned int i=0; i indices; + for ( unsigned int i=0; i(dsDevices.size()); +} + +RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device ) +{ + RtAudio::DeviceInfo info; + info.probed = false; + + if ( dsDevices.size() == 0 ) { + // Force a query of all devices + getDeviceCount(); + if ( dsDevices.size() == 0 ) { + errorText_ = "RtApiDs::getDeviceInfo: no devices found!"; + error( RtAudioError::INVALID_USE ); + return info; + } + } + + if ( device >= dsDevices.size() ) { + errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!"; + error( RtAudioError::INVALID_USE ); + return info; + } + + HRESULT result; + if ( dsDevices[ device ].validId[0] == false ) goto probeInput; + + LPDIRECTSOUND output; + DSCAPS outCaps; + result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL ); + if ( FAILED( result ) ) { + errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!"; + errorText_ = errorStream_.str(); + error( RtAudioError::WARNING ); + goto probeInput; + } + + outCaps.dwSize = sizeof( outCaps ); + result = output->GetCaps( &outCaps ); + if ( FAILED( result ) ) { + output->Release(); + errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!"; + errorText_ = errorStream_.str(); + error( RtAudioError::WARNING ); + goto probeInput; + } + + // Get output channel information. + info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1; + + // Get sample rate information. + info.sampleRates.clear(); + for ( unsigned int k=0; k= (unsigned int) outCaps.dwMinSecondarySampleRate && + SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) + info.sampleRates.push_back( SAMPLE_RATES[k] ); + } + + // Get format information. + if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16; + if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8; + + output->Release(); + + if ( getDefaultOutputDevice() == device ) + info.isDefaultOutput = true; + + if ( dsDevices[ device ].validId[1] == false ) { + info.name = dsDevices[ device ].name; + info.probed = true; + return info; + } + + probeInput: + + LPDIRECTSOUNDCAPTURE input; + result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL ); if ( FAILED( result ) ) { errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!"; errorText_ = errorStream_.str(); @@ -6144,6 +7503,7 @@ void RtApiAlsa :: startStream() stream_.state = STREAM_RUNNING; unlock: + apiInfo->runnable = true; pthread_cond_signal( &apiInfo->runnable_cv ); MUTEX_UNLOCK( &stream_.mutex ); @@ -6490,7 +7850,7 @@ unsigned int RtApiPulse::getDeviceCount( void ) return 1; } -RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int device ) +RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ ) { RtAudio::DeviceInfo info; info.probed = true; @@ -6785,28 +8145,35 @@ bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode, if ( format == sf->rtaudio_format ) { sf_found = true; stream_.userFormat = sf->rtaudio_format; + stream_.deviceFormat[mode] = stream_.userFormat; ss.format = sf->pa_format; break; } } - if ( !sf_found ) { - errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample format."; - return false; + if ( !sf_found ) { // Use internal data format conversion. + stream_.userFormat = format; + stream_.deviceFormat[mode] = RTAUDIO_FLOAT32; + ss.format = PA_SAMPLE_FLOAT32LE; } - // Set interleaving parameters. + // Set other stream parameters. if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false; else stream_.userInterleaved = true; stream_.deviceInterleaved[mode] = true; stream_.nBuffers = 1; stream_.doByteSwap[mode] = false; - stream_.doConvertBuffer[mode] = channels > 1 && !stream_.userInterleaved; - stream_.deviceFormat[mode] = stream_.userFormat; stream_.nUserChannels[mode] = channels; stream_.nDeviceChannels[mode] = channels + firstChannel; stream_.channelOffset[mode] = 0; std::string streamName = "RtAudio"; + // Set flags for buffer conversion. + stream_.doConvertBuffer[mode] = false; + if ( stream_.userFormat != stream_.deviceFormat[mode] ) + stream_.doConvertBuffer[mode] = true; + if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] ) + stream_.doConvertBuffer[mode] = true; + // Allocate necessary internal buffers. bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat ); stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 ); diff --git a/RtAudio.h b/RtAudio.h index f8285c0..ecb9b6a 100644 --- a/RtAudio.h +++ b/RtAudio.h @@ -270,6 +270,7 @@ class RtAudio LINUX_OSS, /*!< The Linux Open Sound System API. */ UNIX_JACK, /*!< The Jack Low-Latency Audio Server API. */ MACOSX_CORE, /*!< Macintosh OS-X Core Audio API. */ + WINDOWS_WASAPI, /*!< The Microsoft WASAPI API. */ WINDOWS_ASIO, /*!< The Steinberg Audio Stream I/O API. */ WINDOWS_DS, /*!< The Microsoft Direct Sound API. */ RTAUDIO_DUMMY /*!< A compilable but non-functional API. */ @@ -566,7 +567,7 @@ class RtAudio }; // Operating system dependent thread functionality. -#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) +#if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__) #include #include @@ -977,6 +978,43 @@ public: #endif +#if defined(__WINDOWS_WASAPI__) + +struct IMMDeviceEnumerator; + +class RtApiWasapi : public RtApi +{ +public: + RtApiWasapi(); + ~RtApiWasapi(); + + RtAudio::Api getCurrentApi( void ) { return RtAudio::WINDOWS_WASAPI; } + unsigned int getDeviceCount( void ); + RtAudio::DeviceInfo getDeviceInfo( unsigned int device ); + unsigned int getDefaultOutputDevice( void ); + unsigned int getDefaultInputDevice( void ); + void closeStream( void ); + void startStream( void ); + void stopStream( void ); + void abortStream( void ); + +private: + bool coInitialized_; + IMMDeviceEnumerator* deviceEnumerator_; + + bool probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels, + unsigned int firstChannel, unsigned int sampleRate, + RtAudioFormat format, unsigned int* bufferSize, + RtAudio::StreamOptions* options ); + + static DWORD WINAPI runWasapiThread( void* wasapiPtr ); + static DWORD WINAPI stopWasapiThread( void* wasapiPtr ); + static DWORD WINAPI abortWasapiThread( void* wasapiPtr ); + void wasapiThread(); +}; + +#endif + #if defined(__LINUX_ALSA__) class RtApiAlsa: public RtApi diff --git a/tests/duplex.cpp b/tests/duplex.cpp index 60be901..291d2aa 100644 --- a/tests/duplex.cpp +++ b/tests/duplex.cpp @@ -48,8 +48,8 @@ void usage( void ) { exit( 0 ); } -int inout( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *data ) +int inout( void *outputBuffer, void *inputBuffer, unsigned int /*nBufferFrames*/, + double /*streamTime*/, RtAudioStreamStatus status, void *data ) { // Since the number of input and output channels is equal, we can do // a simple buffer copy operation here. diff --git a/tests/playraw.cpp b/tests/playraw.cpp index 7e7256b..03212d7 100644 --- a/tests/playraw.cpp +++ b/tests/playraw.cpp @@ -70,8 +70,8 @@ struct OutputData { }; // Interleaved buffers -int output( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *data ) +int output( void *outputBuffer, void * /*inputBuffer*/, unsigned int nBufferFrames, + double /*streamTime*/, RtAudioStreamStatus /*status*/, void *data ) { OutputData *oData = (OutputData*) data; diff --git a/tests/playsaw.cpp b/tests/playsaw.cpp index cd5f1e1..dc36d4c 100644 --- a/tests/playsaw.cpp +++ b/tests/playsaw.cpp @@ -112,8 +112,8 @@ int saw( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, #else // Use non-interleaved buffers -int saw( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *data ) +int saw( void *outputBuffer, void * /*inputBuffer*/, unsigned int nBufferFrames, + double /*streamTime*/, RtAudioStreamStatus status, void *data ) { unsigned int i, j; extern unsigned int channels; diff --git a/tests/record.cpp b/tests/record.cpp index 4bd9d30..56b59b4 100644 --- a/tests/record.cpp +++ b/tests/record.cpp @@ -67,8 +67,8 @@ struct InputData { }; // Interleaved buffers -int input( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *data ) +int input( void * /*outputBuffer*/, void *inputBuffer, unsigned int nBufferFrames, + double /*streamTime*/, RtAudioStreamStatus /*status*/, void *data ) { InputData *iData = (InputData *) data; diff --git a/tests/testall.cpp b/tests/testall.cpp index c8db735..ec7107b 100644 --- a/tests/testall.cpp +++ b/tests/testall.cpp @@ -32,8 +32,8 @@ void usage( void ) { unsigned int channels; // Interleaved buffers -int sawi( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *data ) +int sawi( void *outputBuffer, void * /*inputBuffer*/, unsigned int nBufferFrames, + double /*streamTime*/, RtAudioStreamStatus status, void *data ) { unsigned int i, j; extern unsigned int channels; @@ -55,8 +55,8 @@ int sawi( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, } // Non-interleaved buffers -int sawni( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *data ) +int sawni( void *outputBuffer, void * /*inputBuffer*/, unsigned int nBufferFrames, + double /*streamTime*/, RtAudioStreamStatus status, void *data ) { unsigned int i, j; extern unsigned int channels; @@ -79,8 +79,8 @@ int sawni( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, return 0; } -int inout( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *data ) +int inout( void *outputBuffer, void *inputBuffer, unsigned int /*nBufferFrames*/, + double /*streamTime*/, RtAudioStreamStatus status, void *data ) { // Since the number of input and output channels is equal, we can do // a simple buffer copy operation here. diff --git a/tests/teststops.cpp b/tests/teststops.cpp index b74a66e..2cfccb4 100644 --- a/tests/teststops.cpp +++ b/tests/teststops.cpp @@ -51,8 +51,8 @@ struct MyData { }; // Interleaved buffers -int pulse( void *outputBuffer, void *inputBuffer, unsigned int nBufferFrames, - double streamTime, RtAudioStreamStatus status, void *mydata ) +int pulse( void *outputBuffer, void * /*inputBuffer*/, unsigned int nBufferFrames, + double /*streamTime*/, RtAudioStreamStatus status, void *mydata ) { // Write out a pulse signal and ignore the input buffer. unsigned int i, j;