From 7dfc764bf0c4555e8e23be97d74348fb8d80570c Mon Sep 17 00:00:00 2001 From: Julian Storer Date: Wed, 9 Sep 2009 17:35:23 +0100 Subject: [PATCH] Initial check-in of a new WASAPI audio device wrapper (not properly tested yet!), and tidied up all the audio device type creation functions to make it easier to cope with all these different device types. Added a couple of config entries to enable WASAPI and DSound, so a windows build can use any combination of device APIs. Also replaced the string-to-double conversion code with a custom function to avoid localisation problems with commas and full-stops. --- .gitignore | 5 + build/win32/vc8/JUCE.vcproj | 5750 +++++++++-------- extras/browser plugins/demo/test.html | 4 +- juce_Config.h | 12 + juce_amalgamated.cpp | 1284 +++- juce_amalgamated.h | 377 +- .../audio_file_formats/juce_AudioFormat.cpp | 2 +- src/audio/devices/juce_AudioDeviceManager.cpp | 42 +- src/core/juce_SystemStats.h | 1 + src/juce_app_includes.h | 18 +- src/native/juce_win32_NativeCode.cpp | 1 + src/native/linux/juce_linux_Audio.cpp | 15 +- src/native/mac/juce_mac_CoreAudio.cpp | 4 +- src/native/windows/juce_win32_ASIO.cpp | 2 +- src/native/windows/juce_win32_DirectSound.cpp | 14 +- .../windows/juce_win32_NativeIncludes.h | 8 + src/native/windows/juce_win32_SystemStats.cpp | 2 +- src/native/windows/juce_win32_WASAPI.cpp | 1080 ++++ src/text/juce_CharacterFunctions.cpp | 152 +- 19 files changed, 5618 insertions(+), 3155 deletions(-) create mode 100644 src/native/windows/juce_win32_WASAPI.cpp diff --git a/.gitignore b/.gitignore index d979055aa1..d883de7d46 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,11 @@ *.ncb *.suo *.obj +*.ilk +*.pch +*.pdb +*.dep +*.idb extras/juce demo/build/macosx/build extras/juce demo/build/win32_vc8/Debug extras/juce demo/build/win32_vc8/Release diff --git a/build/win32/vc8/JUCE.vcproj b/build/win32/vc8/JUCE.vcproj index 8256c273c5..f070b89d39 100644 --- a/build/win32/vc8/JUCE.vcproj +++ b/build/win32/vc8/JUCE.vcproj @@ -1,2873 +1,2877 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/extras/browser plugins/demo/test.html b/extras/browser plugins/demo/test.html index e478fb9ec1..b54ad1507c 100644 --- a/extras/browser plugins/demo/test.html +++ b/extras/browser plugins/demo/test.html @@ -41,15 +41,13 @@ function sendCallbackObjectToPlugin() - + -
diff --git a/juce_Config.h b/juce_Config.h index ec09a72e4d..689579a6cb 100644 --- a/juce_Config.h +++ b/juce_Config.h @@ -73,6 +73,18 @@ #define JUCE_ASIO 1 #endif +/** Comment out this macro to disable the Windows WASAPI audio device type. +*/ +#ifndef JUCE_WASAPI +// #define JUCE_WASAPI 1 +#endif + +/** Comment out this macro to disable the Windows WASAPI audio device type. +*/ +#ifndef JUCE_DIRECTSOUND + #define JUCE_DIRECTSOUND 1 +#endif + /** Comment out this macro to disable building of ALSA device support on Linux. */ #ifndef JUCE_ALSA diff --git a/juce_amalgamated.cpp b/juce_amalgamated.cpp index 18e74e1a57..11e3cee7c2 100644 --- a/juce_amalgamated.cpp +++ b/juce_amalgamated.cpp @@ -84,6 +84,18 @@ #define JUCE_ASIO 1 #endif +/** Comment out this macro to disable the Windows WASAPI audio device type. +*/ +#ifndef JUCE_WASAPI +// #define JUCE_WASAPI 1 +#endif + +/** Comment out this macro to disable the Windows WASAPI audio device type. +*/ +#ifndef JUCE_DIRECTSOUND + #define JUCE_DIRECTSOUND 1 +#endif + /** Comment out this macro to disable building of ALSA device support on Linux. */ #ifndef JUCE_ALSA @@ -356,6 +368,13 @@ #include #endif +#if JUCE_WASAPI + #include + #include + #include + #include +#endif + #if JUCE_QUICKTIME /* If you've got an include error here, you probably need to install the QuickTime SDK and @@ -9771,15 +9790,160 @@ int64 CharacterFunctions::getInt64Value (const juce_wchar* s) throw() #endif } +static double juce_mulexp10 (const double value, int exponent) throw() +{ + if (exponent == 0) + return value; + + if (value == 0) + return 0; + + const bool negative = (exponent < 0); + if (negative) + exponent = -exponent; + + double result = 1.0, power = 10.0; + for (int bit = 1; exponent != 0; bit <<= 1) + { + if ((exponent & bit) != 0) + { + exponent ^= bit; + result *= power; + if (exponent == 0) + break; + } + power *= power; + } + + return negative ? (value / result) : (value * result); +} + +template +double juce_atof (const CharType* const original) throw() +{ + double result[3] = { 0, 0, 0 }, accumulator[2] = { 0, 0 }; + int exponentAdjustment[2] = { 0, 0 }, exponentAccumulator[2] = { -1, -1 }; + int exponent = 0, decPointIndex = 0, digit = 0; + int lastDigit = 0, numSignificantDigits = 0; + bool isNegative = false, digitsFound = false; + const int maxSignificantDigits = 15 + 2; + + const CharType* s = original; + while (CharacterFunctions::isWhitespace (*s)) + ++s; + + switch (*s) + { + case '-': isNegative = true; // fall-through.. + case '+': ++s; + } + + if (*s == 'n' || *s == 'N' || *s == 'i' || *s == 'I') + return atof (String (original)); // Let the c library deal with NAN and INF + + for (;;) + { + if (CharacterFunctions::isDigit (*s)) + { + lastDigit = digit; + digit = *s++ - '0'; + digitsFound = true; + + if (decPointIndex != 0) + exponentAdjustment[1]++; + + if (numSignificantDigits == 0 && digit == 0) + continue; + + if (++numSignificantDigits > maxSignificantDigits) + { + if (digit > 5) + ++accumulator [decPointIndex]; + else if (digit == 5 && (lastDigit & 1) != 0) + ++accumulator [decPointIndex]; + + if (decPointIndex > 0) + exponentAdjustment[1]--; + else + exponentAdjustment[0]++; + + while (CharacterFunctions::isDigit (*s)) + { + ++s; + if (decPointIndex == 0) + exponentAdjustment[0]++; + } + } + else + { + const double maxAccumulatorValue = (double) ((UINT_MAX - 9) / 10); + if (accumulator [decPointIndex] > maxAccumulatorValue) + { + result [decPointIndex] = juce_mulexp10 (result [decPointIndex], exponentAccumulator [decPointIndex]) + + accumulator [decPointIndex]; + accumulator [decPointIndex] = 0; + exponentAccumulator [decPointIndex] = 0; + } + + accumulator [decPointIndex] = accumulator[decPointIndex] * 10 + digit; + exponentAccumulator [decPointIndex]++; + } + } + else if (decPointIndex == 0 && *s == '.') + { + ++s; + decPointIndex = 1; + + if (numSignificantDigits > maxSignificantDigits) + { + while (CharacterFunctions::isDigit (*s)) + ++s; + break; + } + } + else + { + break; + } + } + + result[0] = juce_mulexp10 (result[0], exponentAccumulator[0]) + accumulator[0]; + + if (decPointIndex != 0) + result[1] = juce_mulexp10 (result[1], exponentAccumulator[1]) + accumulator[1]; + + if ((*s == 'e' || *s == 'E') && digitsFound) + { + bool negativeExponent = false; + + switch (*++s) + { + case '-': negativeExponent = true; // fall-through.. + case '+': ++s; + } + + while (CharacterFunctions::isDigit (*s)) + exponent = (exponent * 10) + (*s++ - '0'); + + if (negativeExponent) + exponent = -exponent; + } + + double r = juce_mulexp10 (result[0], exponent + exponentAdjustment[0]); + if (decPointIndex != 0) + r += juce_mulexp10 (result[1], exponent - exponentAdjustment[1]); + + return isNegative ? -r : r; +} + double CharacterFunctions::getDoubleValue (const char* const s) throw() { - return atof (s); + return juce_atof (s); } double CharacterFunctions::getDoubleValue (const juce_wchar* const s) throw() { - wchar_t* endChar; - return wcstod (s, &endChar); + return juce_atof (s); } char CharacterFunctions::toUpperCase (const char character) throw() @@ -22933,30 +23097,36 @@ const OwnedArray & AudioDeviceManager::getAvailableDeviceType return availableDeviceTypes; } -extern AudioIODeviceType* juce_createDefaultAudioIODeviceType(); - -#if JUCE_WIN32 && JUCE_ASIO - extern AudioIODeviceType* juce_createASIOAudioIODeviceType(); -#endif - -#if JUCE_WIN32 && JUCE_WDM_AUDIO - extern AudioIODeviceType* juce_createWDMAudioIODeviceType(); -#endif +AudioIODeviceType* juce_createAudioIODeviceType_CoreAudio(); +AudioIODeviceType* juce_createAudioIODeviceType_WASAPI(); +AudioIODeviceType* juce_createAudioIODeviceType_DirectSound(); +AudioIODeviceType* juce_createAudioIODeviceType_ASIO(); +AudioIODeviceType* juce_createAudioIODeviceType_ALSA(); void AudioDeviceManager::createAudioDeviceTypes (OwnedArray & list) { - AudioIODeviceType* const defaultDeviceType = juce_createDefaultAudioIODeviceType(); + #if JUCE_WIN32 + #if JUCE_WASAPI + if (SystemStats::getOperatingSystemType() >= SystemStats::WinVista) + list.add (juce_createAudioIODeviceType_WASAPI()); + #endif - if (defaultDeviceType != 0) - list.add (defaultDeviceType); + #if JUCE_DIRECTSOUND + list.add (juce_createAudioIODeviceType_DirectSound()); + #endif -#if JUCE_WIN32 && JUCE_ASIO - list.add (juce_createASIOAudioIODeviceType()); -#endif + #if JUCE_ASIO + list.add (juce_createAudioIODeviceType_ASIO()); + #endif + #endif -#if JUCE_WIN32 && JUCE_WDM_AUDIO - list.add (juce_createWDMAudioIODeviceType()); -#endif + #if JUCE_MAC + list.add (juce_createAudioIODeviceType_CoreAudio()); + #endif + + #if JUCE_LINUX && JUCE_ALSA + list.add (juce_createAudioIODeviceType_ALSA()); + #endif } const String AudioDeviceManager::initialise (const int numInputChannelsNeeded, @@ -231058,7 +231228,7 @@ SystemStats::OperatingSystemType SystemStats::getOperatingSystemType() throw() return (info.dwMinorVersion == 0) ? Win2000 : WinXP; case 6: - return WinVista; + return (info.dwMinorVersion == 0) ? WinVista : Windows7; default: jassertfalse // !! not a supported OS! @@ -243866,7 +244036,7 @@ private: const ASIOAudioIODeviceType& operator= (const ASIOAudioIODeviceType&); }; -AudioIODeviceType* juce_createASIOAudioIODeviceType() +AudioIODeviceType* juce_createAudioIODeviceType_ASIO() { return new ASIOAudioIODeviceType(); } @@ -243891,7 +244061,7 @@ AudioIODevice* juce_createASIOAudioIODeviceForGUID (const String& name, /********* Start of inlined file: juce_win32_DirectSound.cpp *********/ // (This file gets included by juce_win32_NativeCode.cpp, rather than being // compiled on its own). -#if JUCE_INCLUDED_FILE +#if JUCE_INCLUDED_FILE && JUCE_DIRECTSOUND END_JUCE_NAMESPACE @@ -245340,11 +245510,6 @@ private: const DSoundAudioIODeviceType& operator= (const DSoundAudioIODeviceType&); }; -AudioIODeviceType* juce_createDefaultAudioIODeviceType() -{ - return new DSoundAudioIODeviceType(); -} - const String DSoundAudioIODevice::openDevice (const BitArray& inputChannels, const BitArray& outputChannels, double sampleRate_, @@ -245480,11 +245645,1056 @@ const String DSoundAudioIODevice::openDevice (const BitArray& inputChannels, return error; } +AudioIODeviceType* juce_createAudioIODeviceType_DirectSound() +{ + return new DSoundAudioIODeviceType(); +} + #undef log #endif /********* End of inlined file: juce_win32_DirectSound.cpp *********/ +/********* Start of inlined file: juce_win32_WASAPI.cpp *********/ +// (This file gets included by juce_win32_NativeCode.cpp, rather than being +// compiled on its own). +#if JUCE_INCLUDED_FILE && JUCE_WASAPI + +#if 1 + +const String getAudioErrorDesc (HRESULT hr) +{ + const char* e = 0; + + switch (hr) + { + case E_POINTER: e = "E_POINTER"; break; + case E_INVALIDARG: e = "E_INVALIDARG"; break; + case AUDCLNT_E_NOT_INITIALIZED: e = "AUDCLNT_E_NOT_INITIALIZED"; break; + case AUDCLNT_E_ALREADY_INITIALIZED: e = "AUDCLNT_E_ALREADY_INITIALIZED"; break; + case AUDCLNT_E_WRONG_ENDPOINT_TYPE: e = "AUDCLNT_E_WRONG_ENDPOINT_TYPE"; break; + case AUDCLNT_E_DEVICE_INVALIDATED: e = "AUDCLNT_E_DEVICE_INVALIDATED"; break; + case AUDCLNT_E_NOT_STOPPED: e = "AUDCLNT_E_NOT_STOPPED"; break; + case AUDCLNT_E_BUFFER_TOO_LARGE: e = "AUDCLNT_E_BUFFER_TOO_LARGE"; break; + case AUDCLNT_E_OUT_OF_ORDER: e = "AUDCLNT_E_OUT_OF_ORDER"; break; + case AUDCLNT_E_UNSUPPORTED_FORMAT: e = "AUDCLNT_E_UNSUPPORTED_FORMAT"; break; + case AUDCLNT_E_INVALID_SIZE: e = "AUDCLNT_E_INVALID_SIZE"; break; + case AUDCLNT_E_DEVICE_IN_USE: e = "AUDCLNT_E_DEVICE_IN_USE"; break; + case AUDCLNT_E_BUFFER_OPERATION_PENDING: e = "AUDCLNT_E_BUFFER_OPERATION_PENDING"; break; + case AUDCLNT_E_THREAD_NOT_REGISTERED: e = "AUDCLNT_E_THREAD_NOT_REGISTERED"; break; + case AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED: e = "AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED"; break; + case AUDCLNT_E_ENDPOINT_CREATE_FAILED: e = "AUDCLNT_E_ENDPOINT_CREATE_FAILED"; break; + case AUDCLNT_E_SERVICE_NOT_RUNNING: e = "AUDCLNT_E_SERVICE_NOT_RUNNING"; break; + case AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED: e = "AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED"; break; + case AUDCLNT_E_EXCLUSIVE_MODE_ONLY: e = "AUDCLNT_E_EXCLUSIVE_MODE_ONLY"; break; + case AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL: e = "AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL"; break; + case AUDCLNT_E_EVENTHANDLE_NOT_SET: e = "AUDCLNT_E_EVENTHANDLE_NOT_SET"; break; + case AUDCLNT_E_INCORRECT_BUFFER_SIZE: e = "AUDCLNT_E_INCORRECT_BUFFER_SIZE"; break; + case AUDCLNT_E_BUFFER_SIZE_ERROR: e = "AUDCLNT_E_BUFFER_SIZE_ERROR"; break; + case AUDCLNT_S_BUFFER_EMPTY: e = "AUDCLNT_S_BUFFER_EMPTY"; break; + case AUDCLNT_S_THREAD_ALREADY_REGISTERED: e = "AUDCLNT_S_THREAD_ALREADY_REGISTERED"; break; + default: return String::toHexString ((int) hr); + } + + return e; +} + +#define logFailure(hr) { if (FAILED (hr)) { DBG ("WASAPI FAIL! " + getAudioErrorDesc (hr)); jassertfalse } } +#define OK(a) wasapi_checkResult(a) + +static bool wasapi_checkResult (HRESULT hr) +{ + logFailure (hr); + return SUCCEEDED (hr); +} + +#else + #define logFailure(hr) {} + #define OK(a) SUCCEEDED(a) +#endif + +static const String wasapi_getDeviceID (IMMDevice* const device) +{ + String s; + WCHAR* deviceId = 0; + + if (OK (device->GetId (&deviceId))) + { + s = String (deviceId); + CoTaskMemFree (deviceId); + } + + return s; +} + +static EDataFlow wasapi_getDataFlow (IMMDevice* const device) +{ + EDataFlow flow = eRender; + ComSmartPtr endPoint; + if (OK (device->QueryInterface (__uuidof (IMMEndpoint), (void**) &endPoint))) + (void) OK (endPoint->GetDataFlow (&flow)); + + return flow; +} + +static int wasapi_refTimeToSamples (const REFERENCE_TIME& t, const double sampleRate) throw() +{ + return roundDoubleToInt (sampleRate * ((double) t) * 0.0000001); +} + +static void wasapi_copyWavFormat (WAVEFORMATEXTENSIBLE& dest, const WAVEFORMATEX* const src) throw() +{ + memcpy (&dest, src, src->wFormatTag == WAVE_FORMAT_EXTENSIBLE ? sizeof (WAVEFORMATEXTENSIBLE) + : sizeof (WAVEFORMATEX)); +} + +class WASAPIDeviceBase +{ +public: + WASAPIDeviceBase (const ComSmartPtr & device_) + : device (device_), + sampleRate (0), + numChannels (0), + actualNumChannels (0), + defaultSampleRate (0), + minBufferSize (0), + defaultBufferSize (0), + latencySamples (0) + { + clientEvent = CreateEvent (0, false, false, _T("JuceWASAPI")); + + ComSmartPtr tempClient (createClient()); + if (tempClient == 0) + return; + + REFERENCE_TIME defaultPeriod, minPeriod; + if (! OK (tempClient->GetDevicePeriod (&defaultPeriod, &minPeriod))) + return; + + WAVEFORMATEX* mixFormat = 0; + if (! OK (tempClient->GetMixFormat (&mixFormat))) + return; + + WAVEFORMATEXTENSIBLE format; + wasapi_copyWavFormat (format, mixFormat); + CoTaskMemFree (mixFormat); + + actualNumChannels = numChannels = format.Format.nChannels; + defaultSampleRate = format.Format.nSamplesPerSec; + minBufferSize = wasapi_refTimeToSamples (minPeriod, defaultSampleRate); + defaultBufferSize = wasapi_refTimeToSamples (defaultPeriod, defaultSampleRate); + + FloatElementComparator comparator; + rates.addSorted (comparator, defaultSampleRate); + + static const double ratesToTest[] = { 44100.0, 48000.0, 88200.0, 96000.0 }; + + for (int i = 0; i < numElementsInArray (ratesToTest); ++i) + { + if (ratesToTest[i] == defaultSampleRate) + continue; + + format.Format.nSamplesPerSec = roundDoubleToInt (ratesToTest[i]); + + if (SUCCEEDED (tempClient->IsFormatSupported (AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*) &format, 0))) + if (! rates.contains (ratesToTest[i])) + rates.addSorted (comparator, ratesToTest[i]); + } + } + + ~WASAPIDeviceBase() + { + device = 0; + CloseHandle (clientEvent); + } + + bool isOk() const throw() { return defaultBufferSize > 0 && defaultSampleRate > 0; } + + bool openClient (const double newSampleRate, const BitArray& newChannels) + { + sampleRate = newSampleRate; + channels = newChannels; + channels.setRange (actualNumChannels, channels.getHighestBit() + 1 - actualNumChannels, false); + numChannels = channels.getHighestBit() + 1; + + if (numChannels == 0) + return true; + + client = createClient(); + + if (client != 0 + && (tryInitialisingWithFormat (true, 4) || tryInitialisingWithFormat (false, 4) + || tryInitialisingWithFormat (false, 3) || tryInitialisingWithFormat (false, 2))) + { + channelMaps.clear(); + for (int i = 0; i <= channels.getHighestBit(); ++i) + if (channels[i]) + channelMaps.add (i); + + REFERENCE_TIME latency; + if (OK (client->GetStreamLatency (&latency))) + latencySamples = wasapi_refTimeToSamples (latency, sampleRate); + + (void) OK (client->GetBufferSize (&actualBufferSize)); + + return OK (client->SetEventHandle (clientEvent)); + } + + return false; + } + + void closeClient() + { + if (client != 0) + client->Stop(); + + client = 0; + ResetEvent (clientEvent); + } + + ComSmartPtr device; + ComSmartPtr client; + double sampleRate, defaultSampleRate; + int numChannels, actualNumChannels; + int minBufferSize, defaultBufferSize, latencySamples; + Array rates; + HANDLE clientEvent; + BitArray channels; + AudioDataConverters::DataFormat dataFormat; + Array channelMaps; + UINT32 actualBufferSize; + int bytesPerSample; + +private: + const ComSmartPtr createClient() + { + ComSmartPtr client; + + if (device != 0) + { + HRESULT hr = device->Activate (__uuidof (IAudioClient), CLSCTX_INPROC_SERVER, 0, (void**) &client); + logFailure (hr); + } + + return client; + } + + bool tryInitialisingWithFormat (const bool useFloat, const int bytesPerSampleToTry) + { + WAVEFORMATEXTENSIBLE format; + zerostruct (format); + + if (numChannels <= 2 && bytesPerSampleToTry <= 2) + { + format.Format.wFormatTag = WAVE_FORMAT_PCM; + } + else + { + format.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + format.Format.cbSize = sizeof (WAVEFORMATEXTENSIBLE) - sizeof (WAVEFORMATEX); + } + + format.Format.nSamplesPerSec = roundDoubleToInt (sampleRate); + format.Format.nChannels = (WORD) numChannels; + format.Format.wBitsPerSample = (WORD) (8 * bytesPerSampleToTry); + format.Format.nAvgBytesPerSec = (DWORD) (format.Format.nSamplesPerSec * numChannels * bytesPerSampleToTry); + format.Format.nBlockAlign = (WORD) (numChannels * bytesPerSampleToTry); + format.SubFormat = useFloat ? KSDATAFORMAT_SUBTYPE_IEEE_FLOAT : KSDATAFORMAT_SUBTYPE_PCM; + format.Samples.wValidBitsPerSample = format.Format.wBitsPerSample; + + switch (numChannels) + { + case 1: format.dwChannelMask = SPEAKER_FRONT_CENTER; break; + case 2: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 4: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 6: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 8: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break; + default: break; + } + + WAVEFORMATEXTENSIBLE* nearestFormat = 0; + + HRESULT hr = client->IsFormatSupported (AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*) &format, (WAVEFORMATEX**) &nearestFormat); + logFailure (hr); + + if (hr == S_FALSE && format.Format.nSamplesPerSec == nearestFormat->Format.nSamplesPerSec) + { + wasapi_copyWavFormat (format, (WAVEFORMATEX*) nearestFormat); + hr = S_OK; + } + + CoTaskMemFree (nearestFormat); + + GUID session; + if (hr == S_OK + && OK (client->Initialize (AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, + 0, 0, (WAVEFORMATEX*) &format, &session))) + { + actualNumChannels = format.Format.nChannels; + const bool isFloat = format.Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE && format.SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + bytesPerSample = format.Format.wBitsPerSample / 8; + dataFormat = isFloat ? AudioDataConverters::float32LE + : (bytesPerSample == 4 ? AudioDataConverters::int32LE + : ((bytesPerSample == 3 ? AudioDataConverters::int24LE + : AudioDataConverters::int16LE))); + return true; + } + + return false; + } +}; + +class WASAPIInputDevice : public WASAPIDeviceBase +{ +public: + WASAPIInputDevice (const ComSmartPtr & device_) + : WASAPIDeviceBase (device_), + reservoir (1, 1) + { + } + + ~WASAPIInputDevice() + { + close(); + } + + bool open (const double newSampleRate, const BitArray& newChannels) + { + reservoirSize = 0; + reservoirCapacity = 16384; + reservoir.setSize (actualNumChannels * reservoirCapacity * sizeof (float)); + return openClient (newSampleRate, newChannels) + && (numChannels == 0 || OK (client->GetService (__uuidof (IAudioCaptureClient), (void**) &captureClient))); + } + + void close() + { + closeClient(); + captureClient = 0; + reservoir.setSize (0); + } + + void copyBuffers (float** destBuffers, int numDestBuffers, int bufferSize, Thread& thread) + { + if (numChannels <= 0) + return; + + int offset = 0; + + while (bufferSize > 0) + { + if (reservoirSize > 0) // There's stuff in the reservoir, so use that... + { + const int samplesToDo = jmin (bufferSize, (int) reservoirSize); + + for (int i = 0; i < numDestBuffers; ++i) + { + float* const dest = destBuffers[i] + offset; + const int srcChan = channelMaps.getUnchecked(i); + + switch (dataFormat) + { + case AudioDataConverters::float32LE: + AudioDataConverters::convertFloat32LEToFloat (((uint8*) reservoir.getData()) + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int32LE: + AudioDataConverters::convertInt32LEToFloat (((uint8*) reservoir.getData()) + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int24LE: + AudioDataConverters::convertInt24LEToFloat (((uint8*) reservoir.getData()) + 3 * srcChan, dest, samplesToDo, 3 * actualNumChannels); + break; + + case AudioDataConverters::int16LE: + AudioDataConverters::convertInt16LEToFloat (((uint8*) reservoir.getData()) + 2 * srcChan, dest, samplesToDo, 2 * actualNumChannels); + break; + + default: jassertfalse; break; + } + } + + bufferSize -= samplesToDo; + offset += samplesToDo; + reservoirSize -= samplesToDo; + } + else + { + UINT32 packetLength = 0; + if (! OK (captureClient->GetNextPacketSize (&packetLength))) + break; + + if (packetLength == 0) + { + if (thread.threadShouldExit()) + break; + + Thread::sleep (1); + continue; + } + + uint8* inputData = 0; + UINT32 numSamplesAvailable; + DWORD flags; + + if (OK (captureClient->GetBuffer (&inputData, &numSamplesAvailable, &flags, 0, 0))) + { + const int samplesToDo = jmin (bufferSize, (int) numSamplesAvailable); + + for (int i = 0; i < numDestBuffers; ++i) + { + float* const dest = destBuffers[i] + offset; + const int srcChan = channelMaps.getUnchecked(i); + + switch (dataFormat) + { + case AudioDataConverters::float32LE: + AudioDataConverters::convertFloat32LEToFloat (inputData + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int32LE: + AudioDataConverters::convertInt32LEToFloat (inputData + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int24LE: + AudioDataConverters::convertInt24LEToFloat (inputData + 3 * srcChan, dest, samplesToDo, 3 * actualNumChannels); + break; + + case AudioDataConverters::int16LE: + AudioDataConverters::convertInt16LEToFloat (inputData + 2 * srcChan, dest, samplesToDo, 2 * actualNumChannels); + break; + + default: jassertfalse; break; + } + } + + bufferSize -= samplesToDo; + offset += samplesToDo; + + if (samplesToDo < numSamplesAvailable) + { + reservoirSize = jmin (numSamplesAvailable - samplesToDo, reservoirCapacity); + memcpy ((uint8*) reservoir.getData(), inputData + bytesPerSample * actualNumChannels * samplesToDo, + bytesPerSample * actualNumChannels * reservoirSize); + } + + captureClient->ReleaseBuffer (numSamplesAvailable); + } + } + } + } + + ComSmartPtr captureClient; + MemoryBlock reservoir; + int reservoirSize, reservoirCapacity; +}; + +class WASAPIOutputDevice : public WASAPIDeviceBase +{ +public: + WASAPIOutputDevice (const ComSmartPtr & device_) + : WASAPIDeviceBase (device_) + { + } + + ~WASAPIOutputDevice() + { + close(); + } + + bool open (const double newSampleRate, const BitArray& newChannels) + { + return openClient (newSampleRate, newChannels) + && (numChannels == 0 || OK (client->GetService (__uuidof (IAudioRenderClient), (void**) &renderClient))); + } + + void close() + { + closeClient(); + renderClient = 0; + } + + void copyBuffers (const float** const srcBuffers, const int numSrcBuffers, int bufferSize, Thread& thread) + { + if (numChannels <= 0) + return; + + int offset = 0; + + while (bufferSize > 0) + { + UINT32 padding = 0; + if (! OK (client->GetCurrentPadding (&padding))) + return; + + const int samplesToDo = jmin ((int) (actualBufferSize - padding), bufferSize); + + if (samplesToDo <= 0) + { + if (thread.threadShouldExit()) + break; + + Thread::sleep (0); + continue; + } + + uint8* outputData = 0; + if (OK (renderClient->GetBuffer (samplesToDo, &outputData))) + { + for (int i = 0; i < numSrcBuffers; ++i) + { + const float* const source = srcBuffers[i] + offset; + const int destChan = channelMaps.getUnchecked(i); + + switch (dataFormat) + { + case AudioDataConverters::float32LE: + AudioDataConverters::convertFloatToFloat32LE (source, outputData + 4 * destChan, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int32LE: + AudioDataConverters::convertFloatToInt32LE (source, outputData + 4 * destChan, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int24LE: + AudioDataConverters::convertFloatToInt24LE (source, outputData + 3 * destChan, samplesToDo, 3 * actualNumChannels); + break; + + case AudioDataConverters::int16LE: + AudioDataConverters::convertFloatToInt16LE (source, outputData + 2 * destChan, samplesToDo, 2 * actualNumChannels); + break; + + default: jassertfalse; break; + } + } + + renderClient->ReleaseBuffer (samplesToDo, 0); + + offset += samplesToDo; + bufferSize -= samplesToDo; + } + } + } + + ComSmartPtr renderClient; +}; + +class WASAPIAudioIODevice : public AudioIODevice, + public Thread +{ +public: + WASAPIAudioIODevice (const String& deviceName, + const int outputDeviceIndex_, const String& outputDeviceId_, + const int inputDeviceIndex_, const String& inputDeviceId_) + : AudioIODevice (deviceName, "Windows Audio"), + Thread ("Juce WASAPI"), + isOpen_ (false), + isStarted (false), + outputDevice (0), + outputDeviceIndex (outputDeviceIndex_), + outputDeviceId (outputDeviceId_), + inputDevice (0), + inputDeviceIndex (inputDeviceIndex_), + inputDeviceId (inputDeviceId_), + currentBufferSizeSamples (0), + currentSampleRate (0), + callback (0) + { + } + + ~WASAPIAudioIODevice() + { + close(); + + deleteAndZero (inputDevice); + deleteAndZero (outputDevice); + } + + bool initialise() + { + double defaultSampleRateIn = 0, defaultSampleRateOut = 0; + int minBufferSizeIn = 0, defaultBufferSizeIn = 0, minBufferSizeOut = 0, defaultBufferSizeOut = 0; + latencyIn = latencyOut = 0; + Array ratesIn, ratesOut; + + if (createDevices()) + { + jassert (inputDevice != 0 || outputDevice != 0); + + if (inputDevice != 0 && outputDevice != 0) + { + defaultSampleRate = jmin (inputDevice->defaultSampleRate, outputDevice->defaultSampleRate); + minBufferSize = jmin (inputDevice->minBufferSize, outputDevice->minBufferSize); + defaultBufferSize = jmax (inputDevice->defaultBufferSize, outputDevice->defaultBufferSize); + sampleRates = inputDevice->rates; + sampleRates.removeValuesNotIn (outputDevice->rates); + jassert (sampleRates.size() > 0); // in and out devices don't share any common sample rates! + } + else + { + WASAPIDeviceBase* const d = inputDevice != 0 ? (WASAPIDeviceBase*) inputDevice : (WASAPIDeviceBase*) outputDevice; + defaultSampleRate = d->defaultSampleRate; + minBufferSize = d->minBufferSize; + defaultBufferSize = d->defaultBufferSize; + sampleRates = d->rates; + } + + IntegerElementComparator comparator; + bufferSizes.addSorted (comparator, defaultBufferSize); + if (minBufferSize != defaultBufferSize) + bufferSizes.addSorted (comparator, minBufferSize); + + int n = 64; + for (int i = 0; i < 40; ++i) + { + if (n >= minBufferSize && ! bufferSizes.contains (n)) + bufferSizes.addSorted (comparator, n); + + n += (n < 512) ? 32 + : ((n < 1024) ? 64 + : ((n < 2048) ? 128 : 256)); + } + + return true; + } + + return false; + } + + const StringArray getOutputChannelNames() + { + StringArray outChannels; + + if (outputDevice != 0) + for (int i = 1; i <= outputDevice->actualNumChannels; ++i) + outChannels.add ("Output channel " + String (i)); + + return outChannels; + } + + const StringArray getInputChannelNames() + { + StringArray inChannels; + + if (inputDevice != 0) + for (int i = 1; i <= inputDevice->actualNumChannels; ++i) + inChannels.add ("Input channel " + String (i)); + + return inChannels; + } + + int getNumSampleRates() { return sampleRates.size(); } + double getSampleRate (int index) { return sampleRates [index]; } + int getNumBufferSizesAvailable() { return bufferSizes.size(); } + int getBufferSizeSamples (int index) { return bufferSizes [index]; } + int getDefaultBufferSize() { return defaultBufferSize; } + + int getCurrentBufferSizeSamples() { return currentBufferSizeSamples; } + double getCurrentSampleRate() { return currentSampleRate; } + int getCurrentBitDepth() { return 32; } + int getOutputLatencyInSamples() { return latencyOut; } + int getInputLatencyInSamples() { return latencyIn; } + const BitArray getActiveOutputChannels() const { return outputDevice != 0 ? outputDevice->channels : BitArray(); } + const BitArray getActiveInputChannels() const { return inputDevice != 0 ? inputDevice->channels : BitArray(); } + const String getLastError() { return lastError; } + + const String open (const BitArray& inputChannels, const BitArray& outputChannels, + double sampleRate, int bufferSizeSamples) + { + close(); + lastError = String::empty; + + currentBufferSizeSamples = bufferSizeSamples <= 0 ? defaultBufferSize : jmax (bufferSizeSamples, minBufferSize); + currentSampleRate = sampleRate > 0 ? sampleRate : defaultSampleRate; + + if (inputDevice != 0 && ! inputDevice->open (currentSampleRate, inputChannels)) + { + lastError = "Couldn't open the input device!"; + return lastError; + } + + if (outputDevice != 0 && ! outputDevice->open (currentSampleRate, outputChannels)) + { + close(); + lastError = "Couldn't open the output device!"; + return lastError; + } + + if (inputDevice != 0 && inputDevice->client != 0) + { + HRESULT hr = inputDevice->client->Start(); + logFailure (hr); //xxx handle this + } + + if (outputDevice != 0 && outputDevice->client != 0) + { + HRESULT hr = outputDevice->client->Start(); + logFailure (hr); //xxx handle this + } + + startThread (8); + + isOpen_ = true; + return lastError; + } + + void close() + { + stop(); + + if (inputDevice != 0) + SetEvent (inputDevice->clientEvent); + + if (outputDevice != 0) + SetEvent (outputDevice->clientEvent); + + stopThread (5000); + + if (inputDevice != 0) + inputDevice->close(); + + if (outputDevice != 0) + outputDevice->close(); + + isOpen_ = false; + } + + bool isOpen() { return isOpen_ && isThreadRunning(); } + bool isPlaying() { return isStarted && isOpen_ && isThreadRunning(); } + + void start (AudioIODeviceCallback* call) + { + if (isOpen_ && call != 0 && ! isStarted) + { + if (! isThreadRunning()) + { + // something's gone wrong and the thread's stopped.. + isOpen_ = false; + return; + } + + call->audioDeviceAboutToStart (this); + + const ScopedLock sl (startStopLock); + callback = call; + isStarted = true; + } + } + + void stop() + { + if (isStarted) + { + AudioIODeviceCallback* const callbackLocal = callback; + + { + const ScopedLock sl (startStopLock); + isStarted = false; + } + + if (callbackLocal != 0) + callbackLocal->audioDeviceStopped(); + } + } + + void run() + { + const int bufferSize = currentBufferSizeSamples; + + HANDLE events[2]; + int numEvents = 0; + if (inputDevice != 0) + events [numEvents++] = inputDevice->clientEvent; + if (outputDevice != 0) + events [numEvents++] = outputDevice->clientEvent; + + const int numInputBuffers = getActiveInputChannels().countNumberOfSetBits(); + const int numOutputBuffers = getActiveOutputChannels().countNumberOfSetBits(); + + AudioSampleBuffer ins (jmax (1, numInputBuffers), bufferSize + 32); + AudioSampleBuffer outs (jmax (1, numOutputBuffers), bufferSize + 32); + float** const inputBuffers = ins.getArrayOfChannels(); + float** const outputBuffers = outs.getArrayOfChannels(); + ins.clear(); + + while (! threadShouldExit()) + { + const DWORD result = WaitForMultipleObjects (numEvents, events, true, 1000); + + if (result == WAIT_TIMEOUT) + continue; + + if (threadShouldExit()) + break; + + if (inputDevice != 0) + inputDevice->copyBuffers (inputBuffers, numInputBuffers, bufferSize, *this); + + // Make the callback.. + { + const ScopedLock sl (startStopLock); + + if (isStarted) + { + JUCE_TRY + { + callback->audioDeviceIOCallback ((const float**) inputBuffers, + numInputBuffers, + outputBuffers, + numOutputBuffers, + bufferSize); + } + JUCE_CATCH_EXCEPTION + } + else + { + outs.clear(); + } + } + + if (outputDevice != 0) + outputDevice->copyBuffers ((const float**) outputBuffers, numOutputBuffers, bufferSize, *this); + } + } + + juce_UseDebuggingNewOperator + + int outputDeviceIndex, inputDeviceIndex; + String outputDeviceId, inputDeviceId; + String lastError; + +private: + // Device stats... + WASAPIInputDevice* inputDevice; + WASAPIOutputDevice* outputDevice; + double defaultSampleRate; + int minBufferSize, defaultBufferSize; + int latencyIn, latencyOut; + Array sampleRates; + Array bufferSizes; + + // Active state... + bool isOpen_, isStarted; + int currentBufferSizeSamples; + double currentSampleRate; + + AudioIODeviceCallback* callback; + CriticalSection startStopLock; + + bool createDevices() + { + ComSmartPtr enumerator; + if (! OK (enumerator.CoCreateInstance (__uuidof (MMDeviceEnumerator), CLSCTX_INPROC_SERVER))) + return false; + + ComSmartPtr deviceCollection; + if (! OK (enumerator->EnumAudioEndpoints (eAll, DEVICE_STATE_ACTIVE, &deviceCollection))) + return false; + + UINT32 numDevices = 0; + if (! OK (deviceCollection->GetCount (&numDevices))) + return false; + + for (UINT32 i = 0; i < numDevices; ++i) + { + ComSmartPtr device; + if (! OK (deviceCollection->Item (i, &device))) + continue; + + const String deviceId (wasapi_getDeviceID (device)); + if (deviceId.isEmpty()) + continue; + + const EDataFlow flow = wasapi_getDataFlow (device); + + if (deviceId == inputDeviceId && flow == eCapture) + inputDevice = new WASAPIInputDevice (device); + else if (deviceId == outputDeviceId && flow == eRender) + outputDevice = new WASAPIOutputDevice (device); + } + + return (outputDeviceId.isEmpty() || (outputDevice != 0 && outputDevice->isOk())) + && (inputDeviceId.isEmpty() || (inputDevice != 0 && inputDevice->isOk())); + } + + WASAPIAudioIODevice (const WASAPIAudioIODevice&); + const WASAPIAudioIODevice& operator= (const WASAPIAudioIODevice&); +}; + +class WASAPIAudioIODeviceType : public AudioIODeviceType +{ +public: + WASAPIAudioIODeviceType() + : AudioIODeviceType (T("Windows Audio")), + hasScanned (false) + { + } + + ~WASAPIAudioIODeviceType() + { + } + + void scanForDevices() + { + hasScanned = true; + + outputDeviceNames.clear(); + inputDeviceNames.clear(); + outputDeviceIds.clear(); + inputDeviceIds.clear(); + + ComSmartPtr enumerator; + if (! OK (enumerator.CoCreateInstance (__uuidof (MMDeviceEnumerator), CLSCTX_INPROC_SERVER))) + return; + + const String defaultRenderer = getDefaultEndpoint (enumerator, false); + const String defaultCapture = getDefaultEndpoint (enumerator, true); + + ComSmartPtr deviceCollection; + UINT32 numDevices = 0; + + if (! (OK (enumerator->EnumAudioEndpoints (eAll, DEVICE_STATE_ACTIVE, &deviceCollection)) + && OK (deviceCollection->GetCount (&numDevices)))) + return; + + for (UINT32 i = 0; i < numDevices; ++i) + { + ComSmartPtr device; + if (! OK (deviceCollection->Item (i, &device))) + continue; + + const String deviceId (wasapi_getDeviceID (device)); + + DWORD state = 0; + if (! OK (device->GetState (&state))) + continue; + + if (state != DEVICE_STATE_ACTIVE) + continue; + + String name; + + { + ComSmartPtr properties; + if (! OK (device->OpenPropertyStore (STGM_READ, &properties))) + continue; + + PROPVARIANT value; + PropVariantInit (&value); + if (OK (properties->GetValue (PKEY_Device_FriendlyName, &value))) + name = value.pwszVal; + + PropVariantClear (&value); + } + + const EDataFlow flow = wasapi_getDataFlow (device); + + if (flow == eRender) + { + const int index = (deviceId == defaultRenderer) ? 0 : -1; + outputDeviceIds.insert (index, deviceId); + outputDeviceNames.insert (index, name); + } + else if (flow == eCapture) + { + const int index = (deviceId == defaultCapture) ? 0 : -1; + inputDeviceIds.insert (index, deviceId); + inputDeviceNames.insert (index, name); + } + } + } + + const StringArray getDeviceNames (const bool wantInputNames) const + { + jassert (hasScanned); // need to call scanForDevices() before doing this + + return wantInputNames ? inputDeviceNames + : outputDeviceNames; + } + + int getDefaultDeviceIndex (const bool /*forInput*/) const + { + jassert (hasScanned); // need to call scanForDevices() before doing this + return 0; + } + + int getIndexOfDevice (AudioIODevice* device, const bool asInput) const + { + jassert (hasScanned); // need to call scanForDevices() before doing this + + WASAPIAudioIODevice* const d = dynamic_cast (device); + return (d == 0) ? -1 : (asInput ? d->inputDeviceIndex : d->outputDeviceIndex); + } + + bool hasSeparateInputsAndOutputs() const { return true; } + + AudioIODevice* createDevice (const String& outputDeviceName, + const String& inputDeviceName) + { + jassert (hasScanned); // need to call scanForDevices() before doing this + + WASAPIAudioIODevice* d = 0; + + const int outputIndex = outputDeviceNames.indexOf (outputDeviceName); + const int inputIndex = inputDeviceNames.indexOf (inputDeviceName); + + if (outputIndex >= 0 || inputIndex >= 0) + { + d = new WASAPIAudioIODevice (outputDeviceName.isNotEmpty() ? outputDeviceName + : inputDeviceName, + outputIndex, outputDeviceIds [outputIndex], + inputIndex, inputDeviceIds [inputIndex]); + + if (! d->initialise()) + deleteAndZero (d); + } + + return d; + } + + juce_UseDebuggingNewOperator + + StringArray outputDeviceNames, outputDeviceIds; + StringArray inputDeviceNames, inputDeviceIds; + +private: + bool hasScanned; + + static const String getDefaultEndpoint (IMMDeviceEnumerator* const enumerator, const bool forCapture) + { + String s; + IMMDevice* dev = 0; + if (OK (enumerator->GetDefaultAudioEndpoint (forCapture ? eCapture : eRender, + eMultimedia, &dev))) + { + WCHAR* deviceId = 0; + if (OK (dev->GetId (&deviceId))) + { + s = String (deviceId); + CoTaskMemFree (deviceId); + } + + dev->Release(); + } + + return s; + } + + WASAPIAudioIODeviceType (const WASAPIAudioIODeviceType&); + const WASAPIAudioIODeviceType& operator= (const WASAPIAudioIODeviceType&); +}; + +AudioIODeviceType* juce_createAudioIODeviceType_WASAPI() +{ + return new WASAPIAudioIODeviceType(); +} + +#undef logFailure +#undef OK + +#endif +/********* End of inlined file: juce_win32_WASAPI.cpp *********/ + /********* Start of inlined file: juce_win32_CameraDevice.cpp *********/ // (This file gets included by juce_win32_NativeCode.cpp, rather than being // compiled on its own). @@ -252537,9 +253747,7 @@ const int KeyPress::rewindKey = (0xffeeff03) | extendedKeyModifier; /********* Start of inlined file: juce_linux_Audio.cpp *********/ // (This file gets included by juce_linux_NativeCode.cpp, rather than being // compiled on its own). -#ifdef JUCE_INCLUDED_FILE - -#if JUCE_ALSA +#ifdef JUCE_INCLUDED_FILE && JUCE_ALSA static const int maxNumChans = 64; @@ -253487,19 +254695,11 @@ private: const ALSAAudioIODeviceType& operator= (const ALSAAudioIODeviceType&); }; -AudioIODeviceType* juce_createDefaultAudioIODeviceType() +AudioIODeviceType* juce_createAudioIODeviceType_ALSA() { return new ALSAAudioIODeviceType(); } -#else // if ALSA is turned off.. - -AudioIODeviceType* juce_createDefaultAudioIODeviceType() -{ - return 0; -} - -#endif #endif /********* End of inlined file: juce_linux_Audio.cpp *********/ @@ -262513,7 +263713,7 @@ private: const CoreAudioIODeviceType& operator= (const CoreAudioIODeviceType&); }; -AudioIODeviceType* juce_createDefaultAudioIODeviceType() +AudioIODeviceType* juce_createAudioIODeviceType_CoreAudio() { return new CoreAudioIODeviceType(); } diff --git a/juce_amalgamated.h b/juce_amalgamated.h index 0e5c87d1ce..f2608e87a5 100644 --- a/juce_amalgamated.h +++ b/juce_amalgamated.h @@ -118,6 +118,18 @@ #define JUCE_ASIO 1 #endif +/** Comment out this macro to disable the Windows WASAPI audio device type. +*/ +#ifndef JUCE_WASAPI +// #define JUCE_WASAPI 1 +#endif + +/** Comment out this macro to disable the Windows WASAPI audio device type. +*/ +#ifndef JUCE_DIRECTSOUND + #define JUCE_DIRECTSOUND 1 +#endif + /** Comment out this macro to disable building of ALSA device support on Linux. */ #ifndef JUCE_ALSA @@ -8206,6 +8218,7 @@ public: Win2000 = 0x4105, WinXP = 0x4106, WinVista = 0x4107, + Windows7 = 0x4108, Windows = 0x4000, /**< To test whether any version of Windows is running, you can use the expression ((getOperatingSystemType() & Windows) != 0). */ @@ -37167,72 +37180,6 @@ private: #endif #ifndef __JUCE_AUDIOFORMATWRITER_JUCEHEADER__ -#endif -#ifndef __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ - -/********* Start of inlined file: juce_AudioSubsectionReader.h *********/ -#ifndef __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ -#define __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ - -/** - This class is used to wrap an AudioFormatReader and only read from a - subsection of the file. - - So if you have a reader which can read a 1000 sample file, you could wrap it - in one of these to only access, e.g. samples 100 to 200, and any samples - outside that will come back as 0. Accessing sample 0 from this reader will - actually read the first sample from the other's subsection, which might - be at a non-zero position. - - @see AudioFormatReader -*/ -class JUCE_API AudioSubsectionReader : public AudioFormatReader -{ -public: - - /** Creates a AudioSubsectionReader for a given data source. - - @param sourceReader the source reader from which we'll be taking data - @param subsectionStartSample the sample within the source reader which will be - mapped onto sample 0 for this reader. - @param subsectionLength the number of samples from the source that will - make up the subsection. If this reader is asked for - any samples beyond this region, it will return zero. - @param deleteSourceWhenDeleted if true, the sourceReader object will be deleted when - this object is deleted. - */ - AudioSubsectionReader (AudioFormatReader* const sourceReader, - const int64 subsectionStartSample, - const int64 subsectionLength, - const bool deleteSourceWhenDeleted); - - /** Destructor. */ - ~AudioSubsectionReader(); - - bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer, - int64 startSampleInFile, int numSamples); - - void readMaxLevels (int64 startSample, - int64 numSamples, - float& lowestLeft, - float& highestLeft, - float& lowestRight, - float& highestRight); - - juce_UseDebuggingNewOperator - -private: - AudioFormatReader* const source; - int64 startSample, length; - const bool deleteSourceWhenDeleted; - - AudioSubsectionReader (const AudioSubsectionReader&); - const AudioSubsectionReader& operator= (const AudioSubsectionReader&); -}; - -#endif // __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ -/********* End of inlined file: juce_AudioSubsectionReader.h *********/ - #endif #ifndef __JUCE_AUDIOTHUMBNAIL_JUCEHEADER__ @@ -37387,6 +37334,72 @@ private: #endif // __JUCE_AUDIOTHUMBNAIL_JUCEHEADER__ /********* End of inlined file: juce_AudioThumbnail.h *********/ +#endif +#ifndef __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ + +/********* Start of inlined file: juce_AudioSubsectionReader.h *********/ +#ifndef __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ +#define __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ + +/** + This class is used to wrap an AudioFormatReader and only read from a + subsection of the file. + + So if you have a reader which can read a 1000 sample file, you could wrap it + in one of these to only access, e.g. samples 100 to 200, and any samples + outside that will come back as 0. Accessing sample 0 from this reader will + actually read the first sample from the other's subsection, which might + be at a non-zero position. + + @see AudioFormatReader +*/ +class JUCE_API AudioSubsectionReader : public AudioFormatReader +{ +public: + + /** Creates a AudioSubsectionReader for a given data source. + + @param sourceReader the source reader from which we'll be taking data + @param subsectionStartSample the sample within the source reader which will be + mapped onto sample 0 for this reader. + @param subsectionLength the number of samples from the source that will + make up the subsection. If this reader is asked for + any samples beyond this region, it will return zero. + @param deleteSourceWhenDeleted if true, the sourceReader object will be deleted when + this object is deleted. + */ + AudioSubsectionReader (AudioFormatReader* const sourceReader, + const int64 subsectionStartSample, + const int64 subsectionLength, + const bool deleteSourceWhenDeleted); + + /** Destructor. */ + ~AudioSubsectionReader(); + + bool readSamples (int** destSamples, int numDestChannels, int startOffsetInDestBuffer, + int64 startSampleInFile, int numSamples); + + void readMaxLevels (int64 startSample, + int64 numSamples, + float& lowestLeft, + float& highestLeft, + float& lowestRight, + float& highestRight); + + juce_UseDebuggingNewOperator + +private: + AudioFormatReader* const source; + int64 startSample, length; + const bool deleteSourceWhenDeleted; + + AudioSubsectionReader (const AudioSubsectionReader&); + const AudioSubsectionReader& operator= (const AudioSubsectionReader&); +}; + +#endif // __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ +/********* End of inlined file: juce_AudioSubsectionReader.h *********/ + #endif #ifndef __JUCE_AUDIOTHUMBNAILCACHE_JUCEHEADER__ @@ -37501,114 +37514,6 @@ public: #endif // __JUCE_FLACAUDIOFORMAT_JUCEHEADER__ /********* End of inlined file: juce_FlacAudioFormat.h *********/ -#endif -#ifndef __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ - -/********* Start of inlined file: juce_OggVorbisAudioFormat.h *********/ -#ifndef __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ -#define __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ - -#if JUCE_USE_OGGVORBIS || defined (DOXYGEN) - -/** - Reads and writes the Ogg-Vorbis audio format. - - To compile this, you'll need to set the JUCE_USE_OGGVORBIS flag in juce_Config.h, - and make sure your include search path and library search path are set up to find - the Vorbis and Ogg header files and static libraries. - - @see AudioFormat, -*/ -class JUCE_API OggVorbisAudioFormat : public AudioFormat -{ -public: - - OggVorbisAudioFormat(); - ~OggVorbisAudioFormat(); - - const Array getPossibleSampleRates(); - const Array getPossibleBitDepths(); - bool canDoStereo(); - bool canDoMono(); - bool isCompressed(); - const StringArray getQualityOptions(); - - /** Tries to estimate the quality level of an ogg file based on its size. - - If it can't read the file for some reason, this will just return 1 (medium quality), - otherwise it will return the approximate quality setting that would have been used - to create the file. - - @see getQualityOptions - */ - int estimateOggFileQuality (const File& source); - - AudioFormatReader* createReaderFor (InputStream* sourceStream, - const bool deleteStreamIfOpeningFails); - - AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo, - double sampleRateToUse, - unsigned int numberOfChannels, - int bitsPerSample, - const StringPairArray& metadataValues, - int qualityOptionIndex); - - juce_UseDebuggingNewOperator -}; - -#endif -#endif // __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ -/********* End of inlined file: juce_OggVorbisAudioFormat.h *********/ - -#endif -#ifndef __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ - -/********* Start of inlined file: juce_QuickTimeAudioFormat.h *********/ -#ifndef __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ -#define __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ - -#if JUCE_QUICKTIME - -/** - Uses QuickTime to read the audio track a movie or media file. - - As well as QuickTime movies, this should also manage to open other audio - files that quicktime can understand, like mp3, m4a, etc. - - @see AudioFormat -*/ -class JUCE_API QuickTimeAudioFormat : public AudioFormat -{ -public: - - /** Creates a format object. */ - QuickTimeAudioFormat(); - - /** Destructor. */ - ~QuickTimeAudioFormat(); - - const Array getPossibleSampleRates(); - const Array getPossibleBitDepths(); - bool canDoStereo(); - bool canDoMono(); - - AudioFormatReader* createReaderFor (InputStream* sourceStream, - const bool deleteStreamIfOpeningFails); - - AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo, - double sampleRateToUse, - unsigned int numberOfChannels, - int bitsPerSample, - const StringPairArray& metadataValues, - int qualityOptionIndex); - - juce_UseDebuggingNewOperator -}; - -#endif -#endif // __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ -/********* End of inlined file: juce_QuickTimeAudioFormat.h *********/ - #endif #ifndef __JUCE_WAVAUDIOFORMAT_JUCEHEADER__ @@ -37731,6 +37636,114 @@ public: #endif // __JUCE_WAVAUDIOFORMAT_JUCEHEADER__ /********* End of inlined file: juce_WavAudioFormat.h *********/ +#endif +#ifndef __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ + +/********* Start of inlined file: juce_OggVorbisAudioFormat.h *********/ +#ifndef __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ +#define __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ + +#if JUCE_USE_OGGVORBIS || defined (DOXYGEN) + +/** + Reads and writes the Ogg-Vorbis audio format. + + To compile this, you'll need to set the JUCE_USE_OGGVORBIS flag in juce_Config.h, + and make sure your include search path and library search path are set up to find + the Vorbis and Ogg header files and static libraries. + + @see AudioFormat, +*/ +class JUCE_API OggVorbisAudioFormat : public AudioFormat +{ +public: + + OggVorbisAudioFormat(); + ~OggVorbisAudioFormat(); + + const Array getPossibleSampleRates(); + const Array getPossibleBitDepths(); + bool canDoStereo(); + bool canDoMono(); + bool isCompressed(); + const StringArray getQualityOptions(); + + /** Tries to estimate the quality level of an ogg file based on its size. + + If it can't read the file for some reason, this will just return 1 (medium quality), + otherwise it will return the approximate quality setting that would have been used + to create the file. + + @see getQualityOptions + */ + int estimateOggFileQuality (const File& source); + + AudioFormatReader* createReaderFor (InputStream* sourceStream, + const bool deleteStreamIfOpeningFails); + + AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo, + double sampleRateToUse, + unsigned int numberOfChannels, + int bitsPerSample, + const StringPairArray& metadataValues, + int qualityOptionIndex); + + juce_UseDebuggingNewOperator +}; + +#endif +#endif // __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ +/********* End of inlined file: juce_OggVorbisAudioFormat.h *********/ + +#endif +#ifndef __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ + +/********* Start of inlined file: juce_QuickTimeAudioFormat.h *********/ +#ifndef __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ +#define __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ + +#if JUCE_QUICKTIME + +/** + Uses QuickTime to read the audio track a movie or media file. + + As well as QuickTime movies, this should also manage to open other audio + files that quicktime can understand, like mp3, m4a, etc. + + @see AudioFormat +*/ +class JUCE_API QuickTimeAudioFormat : public AudioFormat +{ +public: + + /** Creates a format object. */ + QuickTimeAudioFormat(); + + /** Destructor. */ + ~QuickTimeAudioFormat(); + + const Array getPossibleSampleRates(); + const Array getPossibleBitDepths(); + bool canDoStereo(); + bool canDoMono(); + + AudioFormatReader* createReaderFor (InputStream* sourceStream, + const bool deleteStreamIfOpeningFails); + + AudioFormatWriter* createWriterFor (OutputStream* streamToWriteTo, + double sampleRateToUse, + unsigned int numberOfChannels, + int bitsPerSample, + const StringPairArray& metadataValues, + int qualityOptionIndex); + + juce_UseDebuggingNewOperator +}; + +#endif +#endif // __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ +/********* End of inlined file: juce_QuickTimeAudioFormat.h *********/ + #endif #ifndef __JUCE_ACTIONBROADCASTER_JUCEHEADER__ @@ -41118,7 +41131,11 @@ private: /********* End of inlined file: juce_ImageFileFormat.h *********/ #endif -#ifndef __JUCE_DRAWABLE_JUCEHEADER__ +#ifndef __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ + +/********* Start of inlined file: juce_DrawableComposite.h *********/ +#ifndef __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ +#define __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ /********* Start of inlined file: juce_Drawable.h *********/ #ifndef __JUCE_DRAWABLE_JUCEHEADER__ @@ -41274,13 +41291,6 @@ private: #endif // __JUCE_DRAWABLE_JUCEHEADER__ /********* End of inlined file: juce_Drawable.h *********/ -#endif -#ifndef __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ - -/********* Start of inlined file: juce_DrawableComposite.h *********/ -#ifndef __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ -#define __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ - /** A drawable object which acts as a container for a set of other Drawables. @@ -41399,6 +41409,9 @@ private: #endif // __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ /********* End of inlined file: juce_DrawableComposite.h *********/ +#endif +#ifndef __JUCE_DRAWABLE_JUCEHEADER__ + #endif #ifndef __JUCE_DRAWABLEIMAGE_JUCEHEADER__ diff --git a/src/audio/audio_file_formats/juce_AudioFormat.cpp b/src/audio/audio_file_formats/juce_AudioFormat.cpp index c112dc6c5a..4ce8f7df8a 100644 --- a/src/audio/audio_file_formats/juce_AudioFormat.cpp +++ b/src/audio/audio_file_formats/juce_AudioFormat.cpp @@ -204,7 +204,7 @@ void AudioFormatReader::readMaxLevels (int64 startSampleInFile, numSamples -= numToDo; startSampleInFile += numToDo; - + for (int j = numChannels; --j >= 0;) { int bufMax = INT_MIN; diff --git a/src/audio/devices/juce_AudioDeviceManager.cpp b/src/audio/devices/juce_AudioDeviceManager.cpp index 115b17a71a..f6ec54099e 100644 --- a/src/audio/devices/juce_AudioDeviceManager.cpp +++ b/src/audio/devices/juce_AudioDeviceManager.cpp @@ -107,30 +107,36 @@ const OwnedArray & AudioDeviceManager::getAvailableDeviceType } //============================================================================== -extern AudioIODeviceType* juce_createDefaultAudioIODeviceType(); - -#if JUCE_WIN32 && JUCE_ASIO - extern AudioIODeviceType* juce_createASIOAudioIODeviceType(); -#endif - -#if JUCE_WIN32 && JUCE_WDM_AUDIO - extern AudioIODeviceType* juce_createWDMAudioIODeviceType(); -#endif +AudioIODeviceType* juce_createAudioIODeviceType_CoreAudio(); +AudioIODeviceType* juce_createAudioIODeviceType_WASAPI(); +AudioIODeviceType* juce_createAudioIODeviceType_DirectSound(); +AudioIODeviceType* juce_createAudioIODeviceType_ASIO(); +AudioIODeviceType* juce_createAudioIODeviceType_ALSA(); void AudioDeviceManager::createAudioDeviceTypes (OwnedArray & list) { - AudioIODeviceType* const defaultDeviceType = juce_createDefaultAudioIODeviceType(); + #if JUCE_WIN32 + #if JUCE_WASAPI + if (SystemStats::getOperatingSystemType() >= SystemStats::WinVista) + list.add (juce_createAudioIODeviceType_WASAPI()); + #endif + + #if JUCE_DIRECTSOUND + list.add (juce_createAudioIODeviceType_DirectSound()); + #endif - if (defaultDeviceType != 0) - list.add (defaultDeviceType); + #if JUCE_ASIO + list.add (juce_createAudioIODeviceType_ASIO()); + #endif + #endif -#if JUCE_WIN32 && JUCE_ASIO - list.add (juce_createASIOAudioIODeviceType()); -#endif + #if JUCE_MAC + list.add (juce_createAudioIODeviceType_CoreAudio()); + #endif -#if JUCE_WIN32 && JUCE_WDM_AUDIO - list.add (juce_createWDMAudioIODeviceType()); -#endif + #if JUCE_LINUX && JUCE_ALSA + list.add (juce_createAudioIODeviceType_ALSA()); + #endif } //============================================================================== diff --git a/src/core/juce_SystemStats.h b/src/core/juce_SystemStats.h index b5b45603f5..456a7952af 100644 --- a/src/core/juce_SystemStats.h +++ b/src/core/juce_SystemStats.h @@ -60,6 +60,7 @@ public: Win2000 = 0x4105, WinXP = 0x4106, WinVista = 0x4107, + Windows7 = 0x4108, Windows = 0x4000, /**< To test whether any version of Windows is running, you can use the expression ((getOperatingSystemType() & Windows) != 0). */ diff --git a/src/juce_app_includes.h b/src/juce_app_includes.h index 216f09a4c8..3ee829d5c7 100644 --- a/src/juce_app_includes.h +++ b/src/juce_app_includes.h @@ -203,27 +203,27 @@ #ifndef __JUCE_AUDIOFORMATWRITER_JUCEHEADER__ #include "audio/audio_file_formats/juce_AudioFormatWriter.h" #endif -#ifndef __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ - #include "audio/audio_file_formats/juce_AudioSubsectionReader.h" -#endif #ifndef __JUCE_AUDIOTHUMBNAIL_JUCEHEADER__ #include "audio/audio_file_formats/juce_AudioThumbnail.h" #endif +#ifndef __JUCE_AUDIOSUBSECTIONREADER_JUCEHEADER__ + #include "audio/audio_file_formats/juce_AudioSubsectionReader.h" +#endif #ifndef __JUCE_AUDIOTHUMBNAILCACHE_JUCEHEADER__ #include "audio/audio_file_formats/juce_AudioThumbnailCache.h" #endif #ifndef __JUCE_FLACAUDIOFORMAT_JUCEHEADER__ #include "audio/audio_file_formats/juce_FlacAudioFormat.h" #endif +#ifndef __JUCE_WAVAUDIOFORMAT_JUCEHEADER__ + #include "audio/audio_file_formats/juce_WavAudioFormat.h" +#endif #ifndef __JUCE_OGGVORBISAUDIOFORMAT_JUCEHEADER__ #include "audio/audio_file_formats/juce_OggVorbisAudioFormat.h" #endif #ifndef __JUCE_QUICKTIMEAUDIOFORMAT_JUCEHEADER__ #include "audio/audio_file_formats/juce_QuickTimeAudioFormat.h" #endif -#ifndef __JUCE_WAVAUDIOFORMAT_JUCEHEADER__ - #include "audio/audio_file_formats/juce_WavAudioFormat.h" -#endif #ifndef __JUCE_ACTIONBROADCASTER_JUCEHEADER__ #include "events/juce_ActionBroadcaster.h" #endif @@ -371,12 +371,12 @@ #ifndef __JUCE_IMAGEFILEFORMAT_JUCEHEADER__ #include "gui/graphics/imaging/juce_ImageFileFormat.h" #endif -#ifndef __JUCE_DRAWABLE_JUCEHEADER__ - #include "gui/graphics/drawables/juce_Drawable.h" -#endif #ifndef __JUCE_DRAWABLECOMPOSITE_JUCEHEADER__ #include "gui/graphics/drawables/juce_DrawableComposite.h" #endif +#ifndef __JUCE_DRAWABLE_JUCEHEADER__ + #include "gui/graphics/drawables/juce_Drawable.h" +#endif #ifndef __JUCE_DRAWABLEIMAGE_JUCEHEADER__ #include "gui/graphics/drawables/juce_DrawableImage.h" #endif diff --git a/src/native/juce_win32_NativeCode.cpp b/src/native/juce_win32_NativeCode.cpp index 9fe2f86521..6c8df850a7 100644 --- a/src/native/juce_win32_NativeCode.cpp +++ b/src/native/juce_win32_NativeCode.cpp @@ -105,6 +105,7 @@ BEGIN_JUCE_NAMESPACE #include "windows/juce_win32_Midi.cpp" #include "windows/juce_win32_ASIO.cpp" #include "windows/juce_win32_DirectSound.cpp" + #include "windows/juce_win32_WASAPI.cpp" #include "windows/juce_win32_CameraDevice.cpp" #endif diff --git a/src/native/linux/juce_linux_Audio.cpp b/src/native/linux/juce_linux_Audio.cpp index 7b85e85226..fdb0a3ed8d 100644 --- a/src/native/linux/juce_linux_Audio.cpp +++ b/src/native/linux/juce_linux_Audio.cpp @@ -25,9 +25,7 @@ // (This file gets included by juce_linux_NativeCode.cpp, rather than being // compiled on its own). -#ifdef JUCE_INCLUDED_FILE - -#if JUCE_ALSA +#ifdef JUCE_INCLUDED_FILE && JUCE_ALSA //============================================================================== static const int maxNumChans = 64; @@ -992,18 +990,9 @@ private: }; //============================================================================== -AudioIODeviceType* juce_createDefaultAudioIODeviceType() +AudioIODeviceType* juce_createAudioIODeviceType_ALSA() { return new ALSAAudioIODeviceType(); } -//============================================================================== -#else // if ALSA is turned off.. - -AudioIODeviceType* juce_createDefaultAudioIODeviceType() -{ - return 0; -} - -#endif #endif diff --git a/src/native/mac/juce_mac_CoreAudio.cpp b/src/native/mac/juce_mac_CoreAudio.cpp index c585787459..8602c90dcc 100644 --- a/src/native/mac/juce_mac_CoreAudio.cpp +++ b/src/native/mac/juce_mac_CoreAudio.cpp @@ -169,7 +169,7 @@ public: zerostruct (channelName); UInt32 nameSize = sizeof (channelName); - if (AudioDeviceGetProperty (deviceID, chanNum + 1, input, kAudioDevicePropertyChannelName, + if (AudioDeviceGetProperty (deviceID, chanNum + 1, input, kAudioDevicePropertyChannelName, &nameSize, &channelName) == noErr) name = String::fromUTF8 (channelName, nameSize); } @@ -1310,7 +1310,7 @@ private: }; //============================================================================== -AudioIODeviceType* juce_createDefaultAudioIODeviceType() +AudioIODeviceType* juce_createAudioIODeviceType_CoreAudio() { return new CoreAudioIODeviceType(); } diff --git a/src/native/windows/juce_win32_ASIO.cpp b/src/native/windows/juce_win32_ASIO.cpp index b1f69fbb15..5b5f07cfb1 100644 --- a/src/native/windows/juce_win32_ASIO.cpp +++ b/src/native/windows/juce_win32_ASIO.cpp @@ -1949,7 +1949,7 @@ private: const ASIOAudioIODeviceType& operator= (const ASIOAudioIODeviceType&); }; -AudioIODeviceType* juce_createASIOAudioIODeviceType() +AudioIODeviceType* juce_createAudioIODeviceType_ASIO() { return new ASIOAudioIODeviceType(); } diff --git a/src/native/windows/juce_win32_DirectSound.cpp b/src/native/windows/juce_win32_DirectSound.cpp index c237393689..2a3494e27d 100644 --- a/src/native/windows/juce_win32_DirectSound.cpp +++ b/src/native/windows/juce_win32_DirectSound.cpp @@ -25,7 +25,7 @@ // (This file gets included by juce_win32_NativeCode.cpp, rather than being // compiled on its own). -#if JUCE_INCLUDED_FILE +#if JUCE_INCLUDED_FILE && JUCE_DIRECTSOUND //============================================================================== END_JUCE_NAMESPACE @@ -1491,12 +1491,6 @@ private: const DSoundAudioIODeviceType& operator= (const DSoundAudioIODeviceType&); }; -//============================================================================== -AudioIODeviceType* juce_createDefaultAudioIODeviceType() -{ - return new DSoundAudioIODeviceType(); -} - //============================================================================== const String DSoundAudioIODevice::openDevice (const BitArray& inputChannels, const BitArray& outputChannels, @@ -1633,6 +1627,12 @@ const String DSoundAudioIODevice::openDevice (const BitArray& inputChannels, return error; } +//============================================================================== +AudioIODeviceType* juce_createAudioIODeviceType_DirectSound() +{ + return new DSoundAudioIODeviceType(); +} + #undef log diff --git a/src/native/windows/juce_win32_NativeIncludes.h b/src/native/windows/juce_win32_NativeIncludes.h index 96e8c8e56b..4c43ee53d8 100644 --- a/src/native/windows/juce_win32_NativeIncludes.h +++ b/src/native/windows/juce_win32_NativeIncludes.h @@ -133,6 +133,14 @@ #include #endif +//============================================================================== +#if JUCE_WASAPI + #include + #include + #include + #include +#endif + //============================================================================== #if JUCE_QUICKTIME diff --git a/src/native/windows/juce_win32_SystemStats.cpp b/src/native/windows/juce_win32_SystemStats.cpp index eb4eebf89e..10b4927635 100644 --- a/src/native/windows/juce_win32_SystemStats.cpp +++ b/src/native/windows/juce_win32_SystemStats.cpp @@ -258,7 +258,7 @@ SystemStats::OperatingSystemType SystemStats::getOperatingSystemType() throw() return (info.dwMinorVersion == 0) ? Win2000 : WinXP; case 6: - return WinVista; + return (info.dwMinorVersion == 0) ? WinVista : Windows7; default: jassertfalse // !! not a supported OS! diff --git a/src/native/windows/juce_win32_WASAPI.cpp b/src/native/windows/juce_win32_WASAPI.cpp new file mode 100644 index 0000000000..ca3bc202cc --- /dev/null +++ b/src/native/windows/juce_win32_WASAPI.cpp @@ -0,0 +1,1080 @@ +/* + ============================================================================== + + This file is part of the JUCE library - "Jules' Utility Class Extensions" + Copyright 2004-9 by Raw Material Software Ltd. + + ------------------------------------------------------------------------------ + + JUCE can be redistributed and/or modified under the terms of the GNU General + Public License (Version 2), as published by the Free Software Foundation. + A copy of the license is included in the JUCE distribution, or can be found + online at www.gnu.org/licenses. + + JUCE is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR + A PARTICULAR PURPOSE. See the GNU General Public License for more details. + + ------------------------------------------------------------------------------ + + To release a closed-source product which uses JUCE, commercial licenses are + available: visit www.rawmaterialsoftware.com/juce for more information. + + ============================================================================== +*/ + +// (This file gets included by juce_win32_NativeCode.cpp, rather than being +// compiled on its own). +#if JUCE_INCLUDED_FILE && JUCE_WASAPI + +//============================================================================== +#if 1 + +const String getAudioErrorDesc (HRESULT hr) +{ + const char* e = 0; + + switch (hr) + { + case E_POINTER: e = "E_POINTER"; break; + case E_INVALIDARG: e = "E_INVALIDARG"; break; + case AUDCLNT_E_NOT_INITIALIZED: e = "AUDCLNT_E_NOT_INITIALIZED"; break; + case AUDCLNT_E_ALREADY_INITIALIZED: e = "AUDCLNT_E_ALREADY_INITIALIZED"; break; + case AUDCLNT_E_WRONG_ENDPOINT_TYPE: e = "AUDCLNT_E_WRONG_ENDPOINT_TYPE"; break; + case AUDCLNT_E_DEVICE_INVALIDATED: e = "AUDCLNT_E_DEVICE_INVALIDATED"; break; + case AUDCLNT_E_NOT_STOPPED: e = "AUDCLNT_E_NOT_STOPPED"; break; + case AUDCLNT_E_BUFFER_TOO_LARGE: e = "AUDCLNT_E_BUFFER_TOO_LARGE"; break; + case AUDCLNT_E_OUT_OF_ORDER: e = "AUDCLNT_E_OUT_OF_ORDER"; break; + case AUDCLNT_E_UNSUPPORTED_FORMAT: e = "AUDCLNT_E_UNSUPPORTED_FORMAT"; break; + case AUDCLNT_E_INVALID_SIZE: e = "AUDCLNT_E_INVALID_SIZE"; break; + case AUDCLNT_E_DEVICE_IN_USE: e = "AUDCLNT_E_DEVICE_IN_USE"; break; + case AUDCLNT_E_BUFFER_OPERATION_PENDING: e = "AUDCLNT_E_BUFFER_OPERATION_PENDING"; break; + case AUDCLNT_E_THREAD_NOT_REGISTERED: e = "AUDCLNT_E_THREAD_NOT_REGISTERED"; break; + case AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED: e = "AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED"; break; + case AUDCLNT_E_ENDPOINT_CREATE_FAILED: e = "AUDCLNT_E_ENDPOINT_CREATE_FAILED"; break; + case AUDCLNT_E_SERVICE_NOT_RUNNING: e = "AUDCLNT_E_SERVICE_NOT_RUNNING"; break; + case AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED: e = "AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED"; break; + case AUDCLNT_E_EXCLUSIVE_MODE_ONLY: e = "AUDCLNT_E_EXCLUSIVE_MODE_ONLY"; break; + case AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL: e = "AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL"; break; + case AUDCLNT_E_EVENTHANDLE_NOT_SET: e = "AUDCLNT_E_EVENTHANDLE_NOT_SET"; break; + case AUDCLNT_E_INCORRECT_BUFFER_SIZE: e = "AUDCLNT_E_INCORRECT_BUFFER_SIZE"; break; + case AUDCLNT_E_BUFFER_SIZE_ERROR: e = "AUDCLNT_E_BUFFER_SIZE_ERROR"; break; + case AUDCLNT_S_BUFFER_EMPTY: e = "AUDCLNT_S_BUFFER_EMPTY"; break; + case AUDCLNT_S_THREAD_ALREADY_REGISTERED: e = "AUDCLNT_S_THREAD_ALREADY_REGISTERED"; break; + default: return String::toHexString ((int) hr); + } + + return e; +} + +#define logFailure(hr) { if (FAILED (hr)) { DBG ("WASAPI FAIL! " + getAudioErrorDesc (hr)); jassertfalse } } +#define OK(a) wasapi_checkResult(a) + +static bool wasapi_checkResult (HRESULT hr) +{ + logFailure (hr); + return SUCCEEDED (hr); +} + +#else + #define logFailure(hr) {} + #define OK(a) SUCCEEDED(a) +#endif + +//============================================================================== +static const String wasapi_getDeviceID (IMMDevice* const device) +{ + String s; + WCHAR* deviceId = 0; + + if (OK (device->GetId (&deviceId))) + { + s = String (deviceId); + CoTaskMemFree (deviceId); + } + + return s; +} + +static EDataFlow wasapi_getDataFlow (IMMDevice* const device) +{ + EDataFlow flow = eRender; + ComSmartPtr endPoint; + if (OK (device->QueryInterface (__uuidof (IMMEndpoint), (void**) &endPoint))) + (void) OK (endPoint->GetDataFlow (&flow)); + + return flow; +} + +static int wasapi_refTimeToSamples (const REFERENCE_TIME& t, const double sampleRate) throw() +{ + return roundDoubleToInt (sampleRate * ((double) t) * 0.0000001); +} + +static void wasapi_copyWavFormat (WAVEFORMATEXTENSIBLE& dest, const WAVEFORMATEX* const src) throw() +{ + memcpy (&dest, src, src->wFormatTag == WAVE_FORMAT_EXTENSIBLE ? sizeof (WAVEFORMATEXTENSIBLE) + : sizeof (WAVEFORMATEX)); +} + +//============================================================================== +class WASAPIDeviceBase +{ +public: + WASAPIDeviceBase (const ComSmartPtr & device_) + : device (device_), + sampleRate (0), + numChannels (0), + actualNumChannels (0), + defaultSampleRate (0), + minBufferSize (0), + defaultBufferSize (0), + latencySamples (0) + { + clientEvent = CreateEvent (0, false, false, _T("JuceWASAPI")); + + ComSmartPtr tempClient (createClient()); + if (tempClient == 0) + return; + + REFERENCE_TIME defaultPeriod, minPeriod; + if (! OK (tempClient->GetDevicePeriod (&defaultPeriod, &minPeriod))) + return; + + WAVEFORMATEX* mixFormat = 0; + if (! OK (tempClient->GetMixFormat (&mixFormat))) + return; + + WAVEFORMATEXTENSIBLE format; + wasapi_copyWavFormat (format, mixFormat); + CoTaskMemFree (mixFormat); + + actualNumChannels = numChannels = format.Format.nChannels; + defaultSampleRate = format.Format.nSamplesPerSec; + minBufferSize = wasapi_refTimeToSamples (minPeriod, defaultSampleRate); + defaultBufferSize = wasapi_refTimeToSamples (defaultPeriod, defaultSampleRate); + + FloatElementComparator comparator; + rates.addSorted (comparator, defaultSampleRate); + + static const double ratesToTest[] = { 44100.0, 48000.0, 88200.0, 96000.0 }; + + for (int i = 0; i < numElementsInArray (ratesToTest); ++i) + { + if (ratesToTest[i] == defaultSampleRate) + continue; + + format.Format.nSamplesPerSec = roundDoubleToInt (ratesToTest[i]); + + if (SUCCEEDED (tempClient->IsFormatSupported (AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*) &format, 0))) + if (! rates.contains (ratesToTest[i])) + rates.addSorted (comparator, ratesToTest[i]); + } + } + + ~WASAPIDeviceBase() + { + device = 0; + CloseHandle (clientEvent); + } + + bool isOk() const throw() { return defaultBufferSize > 0 && defaultSampleRate > 0; } + + bool openClient (const double newSampleRate, const BitArray& newChannels) + { + sampleRate = newSampleRate; + channels = newChannels; + channels.setRange (actualNumChannels, channels.getHighestBit() + 1 - actualNumChannels, false); + numChannels = channels.getHighestBit() + 1; + + if (numChannels == 0) + return true; + + client = createClient(); + + if (client != 0 + && (tryInitialisingWithFormat (true, 4) || tryInitialisingWithFormat (false, 4) + || tryInitialisingWithFormat (false, 3) || tryInitialisingWithFormat (false, 2))) + { + channelMaps.clear(); + for (int i = 0; i <= channels.getHighestBit(); ++i) + if (channels[i]) + channelMaps.add (i); + + REFERENCE_TIME latency; + if (OK (client->GetStreamLatency (&latency))) + latencySamples = wasapi_refTimeToSamples (latency, sampleRate); + + (void) OK (client->GetBufferSize (&actualBufferSize)); + + return OK (client->SetEventHandle (clientEvent)); + } + + return false; + } + + void closeClient() + { + if (client != 0) + client->Stop(); + + client = 0; + ResetEvent (clientEvent); + } + + ComSmartPtr device; + ComSmartPtr client; + double sampleRate, defaultSampleRate; + int numChannels, actualNumChannels; + int minBufferSize, defaultBufferSize, latencySamples; + Array rates; + HANDLE clientEvent; + BitArray channels; + AudioDataConverters::DataFormat dataFormat; + Array channelMaps; + UINT32 actualBufferSize; + int bytesPerSample; + +private: + const ComSmartPtr createClient() + { + ComSmartPtr client; + + if (device != 0) + { + HRESULT hr = device->Activate (__uuidof (IAudioClient), CLSCTX_INPROC_SERVER, 0, (void**) &client); + logFailure (hr); + } + + return client; + } + + bool tryInitialisingWithFormat (const bool useFloat, const int bytesPerSampleToTry) + { + WAVEFORMATEXTENSIBLE format; + zerostruct (format); + + if (numChannels <= 2 && bytesPerSampleToTry <= 2) + { + format.Format.wFormatTag = WAVE_FORMAT_PCM; + } + else + { + format.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; + format.Format.cbSize = sizeof (WAVEFORMATEXTENSIBLE) - sizeof (WAVEFORMATEX); + } + + format.Format.nSamplesPerSec = roundDoubleToInt (sampleRate); + format.Format.nChannels = (WORD) numChannels; + format.Format.wBitsPerSample = (WORD) (8 * bytesPerSampleToTry); + format.Format.nAvgBytesPerSec = (DWORD) (format.Format.nSamplesPerSec * numChannels * bytesPerSampleToTry); + format.Format.nBlockAlign = (WORD) (numChannels * bytesPerSampleToTry); + format.SubFormat = useFloat ? KSDATAFORMAT_SUBTYPE_IEEE_FLOAT : KSDATAFORMAT_SUBTYPE_PCM; + format.Samples.wValidBitsPerSample = format.Format.wBitsPerSample; + + switch (numChannels) + { + case 1: format.dwChannelMask = SPEAKER_FRONT_CENTER; break; + case 2: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT; break; + case 4: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 6: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT; break; + case 8: format.dwChannelMask = SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_FRONT_CENTER | SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT | SPEAKER_FRONT_LEFT_OF_CENTER | SPEAKER_FRONT_RIGHT_OF_CENTER; break; + default: break; + } + + WAVEFORMATEXTENSIBLE* nearestFormat = 0; + + HRESULT hr = client->IsFormatSupported (AUDCLNT_SHAREMODE_SHARED, (WAVEFORMATEX*) &format, (WAVEFORMATEX**) &nearestFormat); + logFailure (hr); + + if (hr == S_FALSE && format.Format.nSamplesPerSec == nearestFormat->Format.nSamplesPerSec) + { + wasapi_copyWavFormat (format, (WAVEFORMATEX*) nearestFormat); + hr = S_OK; + } + + CoTaskMemFree (nearestFormat); + + GUID session; + if (hr == S_OK + && OK (client->Initialize (AUDCLNT_SHAREMODE_SHARED, AUDCLNT_STREAMFLAGS_EVENTCALLBACK, + 0, 0, (WAVEFORMATEX*) &format, &session))) + { + actualNumChannels = format.Format.nChannels; + const bool isFloat = format.Format.wFormatTag == WAVE_FORMAT_EXTENSIBLE && format.SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT; + bytesPerSample = format.Format.wBitsPerSample / 8; + dataFormat = isFloat ? AudioDataConverters::float32LE + : (bytesPerSample == 4 ? AudioDataConverters::int32LE + : ((bytesPerSample == 3 ? AudioDataConverters::int24LE + : AudioDataConverters::int16LE))); + return true; + } + + return false; + } +}; + +//============================================================================== +class WASAPIInputDevice : public WASAPIDeviceBase +{ +public: + WASAPIInputDevice (const ComSmartPtr & device_) + : WASAPIDeviceBase (device_), + reservoir (1, 1) + { + } + + ~WASAPIInputDevice() + { + close(); + } + + bool open (const double newSampleRate, const BitArray& newChannels) + { + reservoirSize = 0; + reservoirCapacity = 16384; + reservoir.setSize (actualNumChannels * reservoirCapacity * sizeof (float)); + return openClient (newSampleRate, newChannels) + && (numChannels == 0 || OK (client->GetService (__uuidof (IAudioCaptureClient), (void**) &captureClient))); + } + + void close() + { + closeClient(); + captureClient = 0; + reservoir.setSize (0); + } + + void copyBuffers (float** destBuffers, int numDestBuffers, int bufferSize, Thread& thread) + { + if (numChannels <= 0) + return; + + int offset = 0; + + while (bufferSize > 0) + { + if (reservoirSize > 0) // There's stuff in the reservoir, so use that... + { + const int samplesToDo = jmin (bufferSize, (int) reservoirSize); + + for (int i = 0; i < numDestBuffers; ++i) + { + float* const dest = destBuffers[i] + offset; + const int srcChan = channelMaps.getUnchecked(i); + + switch (dataFormat) + { + case AudioDataConverters::float32LE: + AudioDataConverters::convertFloat32LEToFloat (((uint8*) reservoir.getData()) + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int32LE: + AudioDataConverters::convertInt32LEToFloat (((uint8*) reservoir.getData()) + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int24LE: + AudioDataConverters::convertInt24LEToFloat (((uint8*) reservoir.getData()) + 3 * srcChan, dest, samplesToDo, 3 * actualNumChannels); + break; + + case AudioDataConverters::int16LE: + AudioDataConverters::convertInt16LEToFloat (((uint8*) reservoir.getData()) + 2 * srcChan, dest, samplesToDo, 2 * actualNumChannels); + break; + + default: jassertfalse; break; + } + } + + bufferSize -= samplesToDo; + offset += samplesToDo; + reservoirSize -= samplesToDo; + } + else + { + UINT32 packetLength = 0; + if (! OK (captureClient->GetNextPacketSize (&packetLength))) + break; + + if (packetLength == 0) + { + if (thread.threadShouldExit()) + break; + + Thread::sleep (1); + continue; + } + + uint8* inputData = 0; + UINT32 numSamplesAvailable; + DWORD flags; + + if (OK (captureClient->GetBuffer (&inputData, &numSamplesAvailable, &flags, 0, 0))) + { + const int samplesToDo = jmin (bufferSize, (int) numSamplesAvailable); + + for (int i = 0; i < numDestBuffers; ++i) + { + float* const dest = destBuffers[i] + offset; + const int srcChan = channelMaps.getUnchecked(i); + + switch (dataFormat) + { + case AudioDataConverters::float32LE: + AudioDataConverters::convertFloat32LEToFloat (inputData + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int32LE: + AudioDataConverters::convertInt32LEToFloat (inputData + 4 * srcChan, dest, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int24LE: + AudioDataConverters::convertInt24LEToFloat (inputData + 3 * srcChan, dest, samplesToDo, 3 * actualNumChannels); + break; + + case AudioDataConverters::int16LE: + AudioDataConverters::convertInt16LEToFloat (inputData + 2 * srcChan, dest, samplesToDo, 2 * actualNumChannels); + break; + + default: jassertfalse; break; + } + } + + bufferSize -= samplesToDo; + offset += samplesToDo; + + if (samplesToDo < numSamplesAvailable) + { + reservoirSize = jmin (numSamplesAvailable - samplesToDo, reservoirCapacity); + memcpy ((uint8*) reservoir.getData(), inputData + bytesPerSample * actualNumChannels * samplesToDo, + bytesPerSample * actualNumChannels * reservoirSize); + } + + captureClient->ReleaseBuffer (numSamplesAvailable); + } + } + } + } + + ComSmartPtr captureClient; + MemoryBlock reservoir; + int reservoirSize, reservoirCapacity; +}; + +//============================================================================== +class WASAPIOutputDevice : public WASAPIDeviceBase +{ +public: + WASAPIOutputDevice (const ComSmartPtr & device_) + : WASAPIDeviceBase (device_) + { + } + + ~WASAPIOutputDevice() + { + close(); + } + + bool open (const double newSampleRate, const BitArray& newChannels) + { + return openClient (newSampleRate, newChannels) + && (numChannels == 0 || OK (client->GetService (__uuidof (IAudioRenderClient), (void**) &renderClient))); + } + + void close() + { + closeClient(); + renderClient = 0; + } + + void copyBuffers (const float** const srcBuffers, const int numSrcBuffers, int bufferSize, Thread& thread) + { + if (numChannels <= 0) + return; + + int offset = 0; + + while (bufferSize > 0) + { + UINT32 padding = 0; + if (! OK (client->GetCurrentPadding (&padding))) + return; + + const int samplesToDo = jmin ((int) (actualBufferSize - padding), bufferSize); + + if (samplesToDo <= 0) + { + if (thread.threadShouldExit()) + break; + + Thread::sleep (0); + continue; + } + + uint8* outputData = 0; + if (OK (renderClient->GetBuffer (samplesToDo, &outputData))) + { + for (int i = 0; i < numSrcBuffers; ++i) + { + const float* const source = srcBuffers[i] + offset; + const int destChan = channelMaps.getUnchecked(i); + + switch (dataFormat) + { + case AudioDataConverters::float32LE: + AudioDataConverters::convertFloatToFloat32LE (source, outputData + 4 * destChan, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int32LE: + AudioDataConverters::convertFloatToInt32LE (source, outputData + 4 * destChan, samplesToDo, 4 * actualNumChannels); + break; + + case AudioDataConverters::int24LE: + AudioDataConverters::convertFloatToInt24LE (source, outputData + 3 * destChan, samplesToDo, 3 * actualNumChannels); + break; + + case AudioDataConverters::int16LE: + AudioDataConverters::convertFloatToInt16LE (source, outputData + 2 * destChan, samplesToDo, 2 * actualNumChannels); + break; + + default: jassertfalse; break; + } + } + + renderClient->ReleaseBuffer (samplesToDo, 0); + + offset += samplesToDo; + bufferSize -= samplesToDo; + } + } + } + + ComSmartPtr renderClient; +}; + +//============================================================================== +class WASAPIAudioIODevice : public AudioIODevice, + public Thread +{ +public: + WASAPIAudioIODevice (const String& deviceName, + const int outputDeviceIndex_, const String& outputDeviceId_, + const int inputDeviceIndex_, const String& inputDeviceId_) + : AudioIODevice (deviceName, "Windows Audio"), + Thread ("Juce WASAPI"), + isOpen_ (false), + isStarted (false), + outputDevice (0), + outputDeviceIndex (outputDeviceIndex_), + outputDeviceId (outputDeviceId_), + inputDevice (0), + inputDeviceIndex (inputDeviceIndex_), + inputDeviceId (inputDeviceId_), + currentBufferSizeSamples (0), + currentSampleRate (0), + callback (0) + { + } + + ~WASAPIAudioIODevice() + { + close(); + + deleteAndZero (inputDevice); + deleteAndZero (outputDevice); + } + + bool initialise() + { + double defaultSampleRateIn = 0, defaultSampleRateOut = 0; + int minBufferSizeIn = 0, defaultBufferSizeIn = 0, minBufferSizeOut = 0, defaultBufferSizeOut = 0; + latencyIn = latencyOut = 0; + Array ratesIn, ratesOut; + + if (createDevices()) + { + jassert (inputDevice != 0 || outputDevice != 0); + + if (inputDevice != 0 && outputDevice != 0) + { + defaultSampleRate = jmin (inputDevice->defaultSampleRate, outputDevice->defaultSampleRate); + minBufferSize = jmin (inputDevice->minBufferSize, outputDevice->minBufferSize); + defaultBufferSize = jmax (inputDevice->defaultBufferSize, outputDevice->defaultBufferSize); + sampleRates = inputDevice->rates; + sampleRates.removeValuesNotIn (outputDevice->rates); + jassert (sampleRates.size() > 0); // in and out devices don't share any common sample rates! + } + else + { + WASAPIDeviceBase* const d = inputDevice != 0 ? (WASAPIDeviceBase*) inputDevice : (WASAPIDeviceBase*) outputDevice; + defaultSampleRate = d->defaultSampleRate; + minBufferSize = d->minBufferSize; + defaultBufferSize = d->defaultBufferSize; + sampleRates = d->rates; + } + + IntegerElementComparator comparator; + bufferSizes.addSorted (comparator, defaultBufferSize); + if (minBufferSize != defaultBufferSize) + bufferSizes.addSorted (comparator, minBufferSize); + + int n = 64; + for (int i = 0; i < 40; ++i) + { + if (n >= minBufferSize && ! bufferSizes.contains (n)) + bufferSizes.addSorted (comparator, n); + + n += (n < 512) ? 32 + : ((n < 1024) ? 64 + : ((n < 2048) ? 128 : 256)); + } + + return true; + } + + return false; + } + + const StringArray getOutputChannelNames() + { + StringArray outChannels; + + if (outputDevice != 0) + for (int i = 1; i <= outputDevice->actualNumChannels; ++i) + outChannels.add ("Output channel " + String (i)); + + return outChannels; + } + + const StringArray getInputChannelNames() + { + StringArray inChannels; + + if (inputDevice != 0) + for (int i = 1; i <= inputDevice->actualNumChannels; ++i) + inChannels.add ("Input channel " + String (i)); + + return inChannels; + } + + int getNumSampleRates() { return sampleRates.size(); } + double getSampleRate (int index) { return sampleRates [index]; } + int getNumBufferSizesAvailable() { return bufferSizes.size(); } + int getBufferSizeSamples (int index) { return bufferSizes [index]; } + int getDefaultBufferSize() { return defaultBufferSize; } + + int getCurrentBufferSizeSamples() { return currentBufferSizeSamples; } + double getCurrentSampleRate() { return currentSampleRate; } + int getCurrentBitDepth() { return 32; } + int getOutputLatencyInSamples() { return latencyOut; } + int getInputLatencyInSamples() { return latencyIn; } + const BitArray getActiveOutputChannels() const { return outputDevice != 0 ? outputDevice->channels : BitArray(); } + const BitArray getActiveInputChannels() const { return inputDevice != 0 ? inputDevice->channels : BitArray(); } + const String getLastError() { return lastError; } + + + const String open (const BitArray& inputChannels, const BitArray& outputChannels, + double sampleRate, int bufferSizeSamples) + { + close(); + lastError = String::empty; + + currentBufferSizeSamples = bufferSizeSamples <= 0 ? defaultBufferSize : jmax (bufferSizeSamples, minBufferSize); + currentSampleRate = sampleRate > 0 ? sampleRate : defaultSampleRate; + + if (inputDevice != 0 && ! inputDevice->open (currentSampleRate, inputChannels)) + { + lastError = "Couldn't open the input device!"; + return lastError; + } + + if (outputDevice != 0 && ! outputDevice->open (currentSampleRate, outputChannels)) + { + close(); + lastError = "Couldn't open the output device!"; + return lastError; + } + + if (inputDevice != 0 && inputDevice->client != 0) + { + HRESULT hr = inputDevice->client->Start(); + logFailure (hr); //xxx handle this + } + + if (outputDevice != 0 && outputDevice->client != 0) + { + HRESULT hr = outputDevice->client->Start(); + logFailure (hr); //xxx handle this + } + + startThread (8); + + isOpen_ = true; + return lastError; + } + + void close() + { + stop(); + + if (inputDevice != 0) + SetEvent (inputDevice->clientEvent); + + if (outputDevice != 0) + SetEvent (outputDevice->clientEvent); + + stopThread (5000); + + if (inputDevice != 0) + inputDevice->close(); + + if (outputDevice != 0) + outputDevice->close(); + + isOpen_ = false; + } + + bool isOpen() { return isOpen_ && isThreadRunning(); } + bool isPlaying() { return isStarted && isOpen_ && isThreadRunning(); } + + void start (AudioIODeviceCallback* call) + { + if (isOpen_ && call != 0 && ! isStarted) + { + if (! isThreadRunning()) + { + // something's gone wrong and the thread's stopped.. + isOpen_ = false; + return; + } + + call->audioDeviceAboutToStart (this); + + const ScopedLock sl (startStopLock); + callback = call; + isStarted = true; + } + } + + void stop() + { + if (isStarted) + { + AudioIODeviceCallback* const callbackLocal = callback; + + { + const ScopedLock sl (startStopLock); + isStarted = false; + } + + if (callbackLocal != 0) + callbackLocal->audioDeviceStopped(); + } + } + + void run() + { + const int bufferSize = currentBufferSizeSamples; + + HANDLE events[2]; + int numEvents = 0; + if (inputDevice != 0) + events [numEvents++] = inputDevice->clientEvent; + if (outputDevice != 0) + events [numEvents++] = outputDevice->clientEvent; + + const int numInputBuffers = getActiveInputChannels().countNumberOfSetBits(); + const int numOutputBuffers = getActiveOutputChannels().countNumberOfSetBits(); + + AudioSampleBuffer ins (jmax (1, numInputBuffers), bufferSize + 32); + AudioSampleBuffer outs (jmax (1, numOutputBuffers), bufferSize + 32); + float** const inputBuffers = ins.getArrayOfChannels(); + float** const outputBuffers = outs.getArrayOfChannels(); + ins.clear(); + + while (! threadShouldExit()) + { + const DWORD result = WaitForMultipleObjects (numEvents, events, true, 1000); + + if (result == WAIT_TIMEOUT) + continue; + + if (threadShouldExit()) + break; + + if (inputDevice != 0) + inputDevice->copyBuffers (inputBuffers, numInputBuffers, bufferSize, *this); + + // Make the callback.. + { + const ScopedLock sl (startStopLock); + + if (isStarted) + { + JUCE_TRY + { + callback->audioDeviceIOCallback ((const float**) inputBuffers, + numInputBuffers, + outputBuffers, + numOutputBuffers, + bufferSize); + } + JUCE_CATCH_EXCEPTION + } + else + { + outs.clear(); + } + } + + if (outputDevice != 0) + outputDevice->copyBuffers ((const float**) outputBuffers, numOutputBuffers, bufferSize, *this); + } + } + + //============================================================================== + juce_UseDebuggingNewOperator + + //============================================================================== + int outputDeviceIndex, inputDeviceIndex; + String outputDeviceId, inputDeviceId; + String lastError; + +private: + // Device stats... + WASAPIInputDevice* inputDevice; + WASAPIOutputDevice* outputDevice; + double defaultSampleRate; + int minBufferSize, defaultBufferSize; + int latencyIn, latencyOut; + Array sampleRates; + Array bufferSizes; + + // Active state... + bool isOpen_, isStarted; + int currentBufferSizeSamples; + double currentSampleRate; + + AudioIODeviceCallback* callback; + CriticalSection startStopLock; + + //============================================================================== + bool createDevices() + { + ComSmartPtr enumerator; + if (! OK (enumerator.CoCreateInstance (__uuidof (MMDeviceEnumerator), CLSCTX_INPROC_SERVER))) + return false; + + ComSmartPtr deviceCollection; + if (! OK (enumerator->EnumAudioEndpoints (eAll, DEVICE_STATE_ACTIVE, &deviceCollection))) + return false; + + UINT32 numDevices = 0; + if (! OK (deviceCollection->GetCount (&numDevices))) + return false; + + for (UINT32 i = 0; i < numDevices; ++i) + { + ComSmartPtr device; + if (! OK (deviceCollection->Item (i, &device))) + continue; + + const String deviceId (wasapi_getDeviceID (device)); + if (deviceId.isEmpty()) + continue; + + const EDataFlow flow = wasapi_getDataFlow (device); + + if (deviceId == inputDeviceId && flow == eCapture) + inputDevice = new WASAPIInputDevice (device); + else if (deviceId == outputDeviceId && flow == eRender) + outputDevice = new WASAPIOutputDevice (device); + } + + return (outputDeviceId.isEmpty() || (outputDevice != 0 && outputDevice->isOk())) + && (inputDeviceId.isEmpty() || (inputDevice != 0 && inputDevice->isOk())); + } + + //============================================================================== + WASAPIAudioIODevice (const WASAPIAudioIODevice&); + const WASAPIAudioIODevice& operator= (const WASAPIAudioIODevice&); +}; + + +//============================================================================== +class WASAPIAudioIODeviceType : public AudioIODeviceType +{ +public: + WASAPIAudioIODeviceType() + : AudioIODeviceType (T("Windows Audio")), + hasScanned (false) + { + } + + ~WASAPIAudioIODeviceType() + { + } + + //============================================================================== + void scanForDevices() + { + hasScanned = true; + + outputDeviceNames.clear(); + inputDeviceNames.clear(); + outputDeviceIds.clear(); + inputDeviceIds.clear(); + + ComSmartPtr enumerator; + if (! OK (enumerator.CoCreateInstance (__uuidof (MMDeviceEnumerator), CLSCTX_INPROC_SERVER))) + return; + + const String defaultRenderer = getDefaultEndpoint (enumerator, false); + const String defaultCapture = getDefaultEndpoint (enumerator, true); + + ComSmartPtr deviceCollection; + UINT32 numDevices = 0; + + if (! (OK (enumerator->EnumAudioEndpoints (eAll, DEVICE_STATE_ACTIVE, &deviceCollection)) + && OK (deviceCollection->GetCount (&numDevices)))) + return; + + for (UINT32 i = 0; i < numDevices; ++i) + { + ComSmartPtr device; + if (! OK (deviceCollection->Item (i, &device))) + continue; + + const String deviceId (wasapi_getDeviceID (device)); + + DWORD state = 0; + if (! OK (device->GetState (&state))) + continue; + + if (state != DEVICE_STATE_ACTIVE) + continue; + + String name; + + { + ComSmartPtr properties; + if (! OK (device->OpenPropertyStore (STGM_READ, &properties))) + continue; + + PROPVARIANT value; + PropVariantInit (&value); + if (OK (properties->GetValue (PKEY_Device_FriendlyName, &value))) + name = value.pwszVal; + + PropVariantClear (&value); + } + + const EDataFlow flow = wasapi_getDataFlow (device); + + if (flow == eRender) + { + const int index = (deviceId == defaultRenderer) ? 0 : -1; + outputDeviceIds.insert (index, deviceId); + outputDeviceNames.insert (index, name); + } + else if (flow == eCapture) + { + const int index = (deviceId == defaultCapture) ? 0 : -1; + inputDeviceIds.insert (index, deviceId); + inputDeviceNames.insert (index, name); + } + } + } + + const StringArray getDeviceNames (const bool wantInputNames) const + { + jassert (hasScanned); // need to call scanForDevices() before doing this + + return wantInputNames ? inputDeviceNames + : outputDeviceNames; + } + + int getDefaultDeviceIndex (const bool /*forInput*/) const + { + jassert (hasScanned); // need to call scanForDevices() before doing this + return 0; + } + + int getIndexOfDevice (AudioIODevice* device, const bool asInput) const + { + jassert (hasScanned); // need to call scanForDevices() before doing this + + WASAPIAudioIODevice* const d = dynamic_cast (device); + return (d == 0) ? -1 : (asInput ? d->inputDeviceIndex : d->outputDeviceIndex); + } + + bool hasSeparateInputsAndOutputs() const { return true; } + + AudioIODevice* createDevice (const String& outputDeviceName, + const String& inputDeviceName) + { + jassert (hasScanned); // need to call scanForDevices() before doing this + + WASAPIAudioIODevice* d = 0; + + const int outputIndex = outputDeviceNames.indexOf (outputDeviceName); + const int inputIndex = inputDeviceNames.indexOf (inputDeviceName); + + if (outputIndex >= 0 || inputIndex >= 0) + { + d = new WASAPIAudioIODevice (outputDeviceName.isNotEmpty() ? outputDeviceName + : inputDeviceName, + outputIndex, outputDeviceIds [outputIndex], + inputIndex, inputDeviceIds [inputIndex]); + + if (! d->initialise()) + deleteAndZero (d); + } + + return d; + } + + //============================================================================== + juce_UseDebuggingNewOperator + + StringArray outputDeviceNames, outputDeviceIds; + StringArray inputDeviceNames, inputDeviceIds; + +private: + bool hasScanned; + + //============================================================================== + static const String getDefaultEndpoint (IMMDeviceEnumerator* const enumerator, const bool forCapture) + { + String s; + IMMDevice* dev = 0; + if (OK (enumerator->GetDefaultAudioEndpoint (forCapture ? eCapture : eRender, + eMultimedia, &dev))) + { + WCHAR* deviceId = 0; + if (OK (dev->GetId (&deviceId))) + { + s = String (deviceId); + CoTaskMemFree (deviceId); + } + + dev->Release(); + } + + return s; + } + + //============================================================================== + WASAPIAudioIODeviceType (const WASAPIAudioIODeviceType&); + const WASAPIAudioIODeviceType& operator= (const WASAPIAudioIODeviceType&); +}; + +//============================================================================== +AudioIODeviceType* juce_createAudioIODeviceType_WASAPI() +{ + return new WASAPIAudioIODeviceType(); +} + +#undef logFailure +#undef OK + +#endif diff --git a/src/text/juce_CharacterFunctions.cpp b/src/text/juce_CharacterFunctions.cpp index c2d2478b85..b9f02d098d 100644 --- a/src/text/juce_CharacterFunctions.cpp +++ b/src/text/juce_CharacterFunctions.cpp @@ -431,15 +431,161 @@ int64 CharacterFunctions::getInt64Value (const juce_wchar* s) throw() #endif } +//============================================================================== +static double juce_mulexp10 (const double value, int exponent) throw() +{ + if (exponent == 0) + return value; + + if (value == 0) + return 0; + + const bool negative = (exponent < 0); + if (negative) + exponent = -exponent; + + double result = 1.0, power = 10.0; + for (int bit = 1; exponent != 0; bit <<= 1) + { + if ((exponent & bit) != 0) + { + exponent ^= bit; + result *= power; + if (exponent == 0) + break; + } + power *= power; + } + + return negative ? (value / result) : (value * result); +} + +template +double juce_atof (const CharType* const original) throw() +{ + double result[3] = { 0, 0, 0 }, accumulator[2] = { 0, 0 }; + int exponentAdjustment[2] = { 0, 0 }, exponentAccumulator[2] = { -1, -1 }; + int exponent = 0, decPointIndex = 0, digit = 0; + int lastDigit = 0, numSignificantDigits = 0; + bool isNegative = false, digitsFound = false; + const int maxSignificantDigits = 15 + 2; + + const CharType* s = original; + while (CharacterFunctions::isWhitespace (*s)) + ++s; + + switch (*s) + { + case '-': isNegative = true; // fall-through.. + case '+': ++s; + } + + if (*s == 'n' || *s == 'N' || *s == 'i' || *s == 'I') + return atof (String (original)); // Let the c library deal with NAN and INF + + for (;;) + { + if (CharacterFunctions::isDigit (*s)) + { + lastDigit = digit; + digit = *s++ - '0'; + digitsFound = true; + + if (decPointIndex != 0) + exponentAdjustment[1]++; + + if (numSignificantDigits == 0 && digit == 0) + continue; + + if (++numSignificantDigits > maxSignificantDigits) + { + if (digit > 5) + ++accumulator [decPointIndex]; + else if (digit == 5 && (lastDigit & 1) != 0) + ++accumulator [decPointIndex]; + + if (decPointIndex > 0) + exponentAdjustment[1]--; + else + exponentAdjustment[0]++; + + while (CharacterFunctions::isDigit (*s)) + { + ++s; + if (decPointIndex == 0) + exponentAdjustment[0]++; + } + } + else + { + const double maxAccumulatorValue = (double) ((UINT_MAX - 9) / 10); + if (accumulator [decPointIndex] > maxAccumulatorValue) + { + result [decPointIndex] = juce_mulexp10 (result [decPointIndex], exponentAccumulator [decPointIndex]) + + accumulator [decPointIndex]; + accumulator [decPointIndex] = 0; + exponentAccumulator [decPointIndex] = 0; + } + + accumulator [decPointIndex] = accumulator[decPointIndex] * 10 + digit; + exponentAccumulator [decPointIndex]++; + } + } + else if (decPointIndex == 0 && *s == '.') + { + ++s; + decPointIndex = 1; + + if (numSignificantDigits > maxSignificantDigits) + { + while (CharacterFunctions::isDigit (*s)) + ++s; + break; + } + } + else + { + break; + } + } + + result[0] = juce_mulexp10 (result[0], exponentAccumulator[0]) + accumulator[0]; + + if (decPointIndex != 0) + result[1] = juce_mulexp10 (result[1], exponentAccumulator[1]) + accumulator[1]; + + if ((*s == 'e' || *s == 'E') && digitsFound) + { + bool negativeExponent = false; + + switch (*++s) + { + case '-': negativeExponent = true; // fall-through.. + case '+': ++s; + } + + while (CharacterFunctions::isDigit (*s)) + exponent = (exponent * 10) + (*s++ - '0'); + + if (negativeExponent) + exponent = -exponent; + } + + double r = juce_mulexp10 (result[0], exponent + exponentAdjustment[0]); + if (decPointIndex != 0) + r += juce_mulexp10 (result[1], exponent - exponentAdjustment[1]); + + return isNegative ? -r : r; +} + double CharacterFunctions::getDoubleValue (const char* const s) throw() { - return atof (s); + return juce_atof (s); } double CharacterFunctions::getDoubleValue (const juce_wchar* const s) throw() { - wchar_t* endChar; - return wcstod (s, &endChar); + return juce_atof (s); } //==============================================================================