Browse Source

Allow only the device's native sample rate in WASAPI API

Revert "fix" for Issue #69
tags/5.1.0
Marcus Tomlinson Stephen Sinclair 7 years ago
parent
commit
8b543a2850
1 changed files with 27 additions and 206 deletions
  1. +27
    -206
      RtAudio.cpp

+ 27
- 206
RtAudio.cpp View File

@@ -3864,152 +3864,6 @@ private:


//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------


// In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
// between HW and the user. The convertBufferWasapi function is used to perform this conversion
// between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
// This sample rate converter works best with conversions between one rate and its multiple.
void convertBufferWasapi( char* outBuffer,
const char* inBuffer,
const unsigned int& channelCount,
const unsigned int& inSampleRate,
const unsigned int& outSampleRate,
const unsigned int& inSampleCount,
unsigned int& outSampleCount,
const RtAudioFormat& format )
{
// calculate the new outSampleCount and relative sampleStep
float sampleRatio = ( float ) outSampleRate / inSampleRate;
float sampleRatioInv = ( float ) 1 / sampleRatio;
float sampleStep = 1.0f / sampleRatio;
float inSampleFraction = 0.0f;

// for cmath functions
using namespace std;

outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );

// if inSampleRate is a multiple of outSampleRate (or vice versa) there's no need to interpolate
if ( floor( sampleRatio ) == sampleRatio || floor( sampleRatioInv ) == sampleRatioInv )
{
// frame-by-frame, copy each relative input sample into it's corresponding output sample
for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
{
unsigned int inSample = ( unsigned int ) inSampleFraction;

switch ( format )
{
case RTAUDIO_SINT8:
memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
break;
case RTAUDIO_SINT16:
memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
break;
case RTAUDIO_SINT24:
memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
break;
case RTAUDIO_SINT32:
memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
break;
case RTAUDIO_FLOAT32:
memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
break;
case RTAUDIO_FLOAT64:
memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
break;
}

// jump to next in sample
inSampleFraction += sampleStep;
}
}
else // else interpolate
{
// frame-by-frame, copy each relative input sample into it's corresponding output sample
for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
{
unsigned int inSample = ( unsigned int ) inSampleFraction;
float inSampleDec = inSampleFraction - inSample;
unsigned int frameInSample = inSample * channelCount;
unsigned int frameOutSample = outSample * channelCount;

switch ( format )
{
case RTAUDIO_SINT8:
{
for ( unsigned int channel = 0; channel < channelCount; channel++ )
{
char fromSample = ( ( char* ) inBuffer )[ frameInSample + channel ];
char toSample = ( ( char* ) inBuffer )[ frameInSample + channelCount + channel ];
char sampleDiff = ( char ) ( ( toSample - fromSample ) * inSampleDec );
( ( char* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
}
break;
}
case RTAUDIO_SINT16:
{
for ( unsigned int channel = 0; channel < channelCount; channel++ )
{
short fromSample = ( ( short* ) inBuffer )[ frameInSample + channel ];
short toSample = ( ( short* ) inBuffer )[ frameInSample + channelCount + channel ];
short sampleDiff = ( short ) ( ( toSample - fromSample ) * inSampleDec );
( ( short* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
}
break;
}
case RTAUDIO_SINT24:
{
for ( unsigned int channel = 0; channel < channelCount; channel++ )
{
int fromSample = ( ( S24* ) inBuffer )[ frameInSample + channel ].asInt();
int toSample = ( ( S24* ) inBuffer )[ frameInSample + channelCount + channel ].asInt();
int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
( ( S24* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
}
break;
}
case RTAUDIO_SINT32:
{
for ( unsigned int channel = 0; channel < channelCount; channel++ )
{
int fromSample = ( ( int* ) inBuffer )[ frameInSample + channel ];
int toSample = ( ( int* ) inBuffer )[ frameInSample + channelCount + channel ];
int sampleDiff = ( int ) ( ( toSample - fromSample ) * inSampleDec );
( ( int* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
}
break;
}
case RTAUDIO_FLOAT32:
{
for ( unsigned int channel = 0; channel < channelCount; channel++ )
{
float fromSample = ( ( float* ) inBuffer )[ frameInSample + channel ];
float toSample = ( ( float* ) inBuffer )[ frameInSample + channelCount + channel ];
float sampleDiff = ( toSample - fromSample ) * inSampleDec;
( ( float* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
}
break;
}
case RTAUDIO_FLOAT64:
{
for ( unsigned int channel = 0; channel < channelCount; channel++ )
{
double fromSample = ( ( double* ) inBuffer )[ frameInSample + channel ];
double toSample = ( ( double* ) inBuffer )[ frameInSample + channelCount + channel ];
double sampleDiff = ( toSample - fromSample ) * inSampleDec;
( ( double* ) outBuffer )[ frameOutSample + channel ] = fromSample + sampleDiff;
}
break;
}
}

// jump to next in sample
inSampleFraction += sampleStep;
}
}
}

//-----------------------------------------------------------------------------

// A structure to hold various information related to the WASAPI implementation. // A structure to hold various information related to the WASAPI implementation.
struct WasapiHandle struct WasapiHandle
{ {
@@ -4275,15 +4129,12 @@ RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
info.duplexChannels = 0; info.duplexChannels = 0;
} }


// sample rates
info.sampleRates.clear();

// allow support for all sample rates as we have a built-in sample rate converter
for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
info.sampleRates.push_back( SAMPLE_RATES[i] );
}
// sample rates (WASAPI only supports the one native sample rate)
info.preferredSampleRate = deviceFormat->nSamplesPerSec; info.preferredSampleRate = deviceFormat->nSamplesPerSec;


info.sampleRates.clear();
info.sampleRates.push_back( deviceFormat->nSamplesPerSec );

// native format // native format
info.nativeFormats = 0; info.nativeFormats = 0;


@@ -4559,6 +4410,7 @@ bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigne
WAVEFORMATEX* deviceFormat = NULL; WAVEFORMATEX* deviceFormat = NULL;
unsigned int bufferBytes; unsigned int bufferBytes;
stream_.state = STREAM_STOPPED; stream_.state = STREAM_STOPPED;
RtAudio::DeviceInfo deviceInfo;


// create API Handle if not already created // create API Handle if not already created
if ( !stream_.apiHandle ) if ( !stream_.apiHandle )
@@ -4599,6 +4451,16 @@ bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigne
goto Exit; goto Exit;
} }


deviceInfo = getDeviceInfo( device );

// validate sample rate
if ( sampleRate != deviceInfo.preferredSampleRate )
{
errorType = RtAudioError::INVALID_USE;
errorText_ = "RtApiWasapi::probeDeviceOpen: " + std::to_string( sampleRate ) + "Hz sample rate not supported. This device only supports " + std::to_string( deviceInfo.preferredSampleRate ) + "Hz.";
goto Exit;
}

// determine whether index falls within capture or render devices // determine whether index falls within capture or render devices
if ( device >= renderDeviceCount ) { if ( device >= renderDeviceCount ) {
if ( mode != INPUT ) { if ( mode != INPUT ) {
@@ -4682,7 +4544,7 @@ bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigne
stream_.nUserChannels[mode] = channels; stream_.nUserChannels[mode] = channels;
stream_.channelOffset[mode] = firstChannel; stream_.channelOffset[mode] = firstChannel;
stream_.userFormat = format; stream_.userFormat = format;
stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
stream_.deviceFormat[mode] = deviceInfo.nativeFormats;


if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
stream_.userInterleaved = false; stream_.userInterleaved = false;
@@ -4782,8 +4644,6 @@ void RtApiWasapi::wasapiThread()


WAVEFORMATEX* captureFormat = NULL; WAVEFORMATEX* captureFormat = NULL;
WAVEFORMATEX* renderFormat = NULL; WAVEFORMATEX* renderFormat = NULL;
float captureSrRatio = 0.0f;
float renderSrRatio = 0.0f;
WasapiBuffer captureBuffer; WasapiBuffer captureBuffer;
WasapiBuffer renderBuffer; WasapiBuffer renderBuffer;


@@ -4793,15 +4653,11 @@ void RtApiWasapi::wasapiThread()
unsigned long captureFlags = 0; unsigned long captureFlags = 0;
unsigned int bufferFrameCount = 0; unsigned int bufferFrameCount = 0;
unsigned int numFramesPadding = 0; unsigned int numFramesPadding = 0;
unsigned int convBufferSize = 0;
bool callbackPushed = false; bool callbackPushed = false;
bool callbackPulled = false; bool callbackPulled = false;
bool callbackStopped = false; bool callbackStopped = false;
int callbackResult = 0; int callbackResult = 0;


// convBuffer is used to store converted buffers between WASAPI and the user
char* convBuffer = NULL;
unsigned int convBuffSize = 0;
unsigned int deviceBuffSize = 0; unsigned int deviceBuffSize = 0;


errorText_.clear(); errorText_.clear();
@@ -4824,11 +4680,8 @@ void RtApiWasapi::wasapiThread()
goto Exit; goto Exit;
} }


captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );

// initialize capture stream according to desire buffer size // initialize capture stream according to desire buffer size
float desiredBufferSize = stream_.bufferSize * captureSrRatio;
REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );


if ( !captureClient ) { if ( !captureClient ) {
hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
@@ -4875,7 +4728,7 @@ void RtApiWasapi::wasapiThread()
} }


// scale outBufferSize according to stream->user sample rate ratio // scale outBufferSize according to stream->user sample rate ratio
unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
inBufferSize *= stream_.nDeviceChannels[INPUT]; inBufferSize *= stream_.nDeviceChannels[INPUT];


// set captureBuffer size // set captureBuffer size
@@ -4904,11 +4757,8 @@ void RtApiWasapi::wasapiThread()
goto Exit; goto Exit;
} }


renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );

// initialize render stream according to desire buffer size // initialize render stream according to desire buffer size
float desiredBufferSize = stream_.bufferSize * renderSrRatio;
REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );


if ( !renderClient ) { if ( !renderClient ) {
hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED, hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
@@ -4955,7 +4805,7 @@ void RtApiWasapi::wasapiThread()
} }


// scale inBufferSize according to user->stream sample rate ratio // scale inBufferSize according to user->stream sample rate ratio
unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
outBufferSize *= stream_.nDeviceChannels[OUTPUT]; outBufferSize *= stream_.nDeviceChannels[OUTPUT];


// set renderBuffer size // set renderBuffer size
@@ -4978,23 +4828,18 @@ void RtApiWasapi::wasapiThread()


if ( stream_.mode == INPUT ) { if ( stream_.mode == INPUT ) {
using namespace std; // for roundf using namespace std; // for roundf
convBuffSize = ( size_t ) roundf( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ); deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
} }
else if ( stream_.mode == OUTPUT ) { else if ( stream_.mode == OUTPUT ) {
convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ); deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
} }
else if ( stream_.mode == DUPLEX ) { else if ( stream_.mode == DUPLEX ) {
convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ), deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) ); stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
} }


convBuffer = ( char* ) malloc( convBuffSize );
stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize ); stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
if ( !convBuffer || !stream_.deviceBuffer ) {
if ( !stream_.deviceBuffer ) {
errorType = RtAudioError::MEMORY_ERROR; errorType = RtAudioError::MEMORY_ERROR;
errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory."; errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
goto Exit; goto Exit;
@@ -5006,26 +4851,15 @@ void RtApiWasapi::wasapiThread()
// Callback Input // Callback Input
// ============== // ==============
// 1. Pull callback buffer from inputBuffer // 1. Pull callback buffer from inputBuffer
// 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
// Convert callback buffer to user format
// 2. If 1. was successful: Convert callback buffer to user format


if ( captureAudioClient ) { if ( captureAudioClient ) {
// Pull callback buffer from inputBuffer // Pull callback buffer from inputBuffer
callbackPulled = captureBuffer.pullBuffer( convBuffer,
( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
stream_.deviceFormat[INPUT] ); stream_.deviceFormat[INPUT] );


if ( callbackPulled ) { if ( callbackPulled ) {
// Convert callback buffer to user sample rate
convertBufferWasapi( stream_.deviceBuffer,
convBuffer,
stream_.nDeviceChannels[INPUT],
captureFormat->nSamplesPerSec,
stream_.sampleRate,
( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
convBufferSize,
stream_.deviceFormat[INPUT] );

if ( stream_.doConvertBuffer[INPUT] ) { if ( stream_.doConvertBuffer[INPUT] ) {
// Convert callback buffer to user format // Convert callback buffer to user format
convertBuffer( stream_.userBuffer[INPUT], convertBuffer( stream_.userBuffer[INPUT],
@@ -5099,8 +4933,7 @@ void RtApiWasapi::wasapiThread()
// Callback Output // Callback Output
// =============== // ===============
// 1. Convert callback buffer to stream format // 1. Convert callback buffer to stream format
// 2. Convert callback buffer to stream sample rate and channel count
// 3. Push callback buffer into outputBuffer
// 2. Push callback buffer into outputBuffer


if ( renderAudioClient && callbackPulled ) { if ( renderAudioClient && callbackPulled ) {
if ( stream_.doConvertBuffer[OUTPUT] ) { if ( stream_.doConvertBuffer[OUTPUT] ) {
@@ -5111,19 +4944,9 @@ void RtApiWasapi::wasapiThread()


} }


// Convert callback buffer to stream sample rate
convertBufferWasapi( convBuffer,
stream_.deviceBuffer,
stream_.nDeviceChannels[OUTPUT],
stream_.sampleRate,
renderFormat->nSamplesPerSec,
stream_.bufferSize,
convBufferSize,
stream_.deviceFormat[OUTPUT] );

// Push callback buffer into outputBuffer // Push callback buffer into outputBuffer
callbackPushed = renderBuffer.pushBuffer( convBuffer,
convBufferSize * stream_.nDeviceChannels[OUTPUT],
callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
stream_.deviceFormat[OUTPUT] ); stream_.deviceFormat[OUTPUT] );
} }
else { else {
@@ -5269,8 +5092,6 @@ Exit:
CoTaskMemFree( captureFormat ); CoTaskMemFree( captureFormat );
CoTaskMemFree( renderFormat ); CoTaskMemFree( renderFormat );


free ( convBuffer );

CoUninitialize(); CoUninitialize();


// update stream state // update stream state


Loading…
Cancel
Save