@@ -31,6 +31,8 @@ | |||||
#include "juce_audio_basics.h" | #include "juce_audio_basics.h" | ||||
#include <juce_core/containers/juce_Optional.h> | |||||
#if JUCE_MINGW && ! defined (alloca) | #if JUCE_MINGW && ! defined (alloca) | ||||
#define alloca __builtin_alloca | #define alloca __builtin_alloca | ||||
#endif | #endif | ||||
@@ -120,4 +120,4 @@ | |||||
#include "sources/juce_ReverbAudioSource.h" | #include "sources/juce_ReverbAudioSource.h" | ||||
#include "sources/juce_ToneGeneratorAudioSource.h" | #include "sources/juce_ToneGeneratorAudioSource.h" | ||||
#include "synthesisers/juce_Synthesiser.h" | #include "synthesisers/juce_Synthesiser.h" | ||||
#include "audio_play_head/juce_AudioPlayHead.h" | |||||
#include "audio_play_head/juce_AudioPlayHead.h" |
@@ -0,0 +1,80 @@ | |||||
/* | |||||
============================================================================== | |||||
This file is part of the JUCE library. | |||||
Copyright (c) 2022 - Raw Material Software Limited | |||||
JUCE is an open source library subject to commercial or open-source | |||||
licensing. | |||||
The code included in this file is provided under the terms of the ISC license | |||||
http://www.isc.org/downloads/software-support-policy/isc-license. Permission | |||||
To use, copy, modify, and/or distribute this software for any purpose with or | |||||
without fee is hereby granted provided that the above copyright notice and | |||||
this permission notice appear in all copies. | |||||
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER | |||||
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE | |||||
DISCLAIMED. | |||||
============================================================================== | |||||
*/ | |||||
// This file will be included directly by macOS/iOS-specific .cpps | |||||
#pragma once | |||||
#if ! DOXYGEN | |||||
#include <mach/mach_time.h> | |||||
namespace juce | |||||
{ | |||||
struct CoreAudioTimeConversions | |||||
{ | |||||
public: | |||||
CoreAudioTimeConversions() | |||||
{ | |||||
mach_timebase_info_data_t info{}; | |||||
mach_timebase_info (&info); | |||||
numerator = info.numer; | |||||
denominator = info.denom; | |||||
} | |||||
uint64_t hostTimeToNanos (uint64_t hostTime) const | |||||
{ | |||||
return multiplyByRatio (hostTime, numerator, denominator); | |||||
} | |||||
uint64_t nanosToHostTime (uint64_t nanos) const | |||||
{ | |||||
return multiplyByRatio (nanos, denominator, numerator); | |||||
} | |||||
private: | |||||
// Adapted from CAHostTimeBase.h in the Core Audio Utility Classes | |||||
static uint64_t multiplyByRatio (uint64_t toMultiply, uint64_t numerator, uint64_t denominator) | |||||
{ | |||||
#if defined (__SIZEOF_INT128__) | |||||
unsigned __int128 | |||||
#else | |||||
long double | |||||
#endif | |||||
result = toMultiply; | |||||
if (numerator != denominator) | |||||
{ | |||||
result *= numerator; | |||||
result /= denominator; | |||||
} | |||||
return (uint64_t) result; | |||||
} | |||||
uint64_t numerator = 0, denominator = 0; | |||||
}; | |||||
} // namespace juce | |||||
#endif |
@@ -69,9 +69,14 @@ public: | |||||
CallbackHandler (AudioDeviceManager& adm) noexcept : owner (adm) {} | CallbackHandler (AudioDeviceManager& adm) noexcept : owner (adm) {} | ||||
private: | private: | ||||
void audioDeviceIOCallback (const float** ins, int numIns, float** outs, int numOuts, int numSamples) override | |||||
void audioDeviceIOCallbackWithContext (const float** ins, | |||||
int numIns, | |||||
float** outs, | |||||
int numOuts, | |||||
int numSamples, | |||||
const AudioIODeviceCallbackContext& context) override | |||||
{ | { | ||||
owner.audioDeviceIOCallbackInt (ins, numIns, outs, numOuts, numSamples); | |||||
owner.audioDeviceIOCallbackInt (ins, numIns, outs, numOuts, numSamples, context); | |||||
} | } | ||||
void audioDeviceAboutToStart (AudioIODevice* device) override | void audioDeviceAboutToStart (AudioIODevice* device) override | ||||
@@ -900,7 +905,8 @@ void AudioDeviceManager::audioDeviceIOCallbackInt (const float** inputChannelDat | |||||
int numInputChannels, | int numInputChannels, | ||||
float** outputChannelData, | float** outputChannelData, | ||||
int numOutputChannels, | int numOutputChannels, | ||||
int numSamples) | |||||
int numSamples, | |||||
const AudioIODeviceCallbackContext& context) | |||||
{ | { | ||||
const ScopedLock sl (audioCallbackLock); | const ScopedLock sl (audioCallbackLock); | ||||
@@ -912,15 +918,23 @@ void AudioDeviceManager::audioDeviceIOCallbackInt (const float** inputChannelDat | |||||
tempBuffer.setSize (jmax (1, numOutputChannels), jmax (1, numSamples), false, false, true); | tempBuffer.setSize (jmax (1, numOutputChannels), jmax (1, numSamples), false, false, true); | ||||
callbacks.getUnchecked(0)->audioDeviceIOCallback (inputChannelData, numInputChannels, | |||||
outputChannelData, numOutputChannels, numSamples); | |||||
callbacks.getUnchecked(0)->audioDeviceIOCallbackWithContext (inputChannelData, | |||||
numInputChannels, | |||||
outputChannelData, | |||||
numOutputChannels, | |||||
numSamples, | |||||
context); | |||||
auto** tempChans = tempBuffer.getArrayOfWritePointers(); | auto** tempChans = tempBuffer.getArrayOfWritePointers(); | ||||
for (int i = callbacks.size(); --i > 0;) | for (int i = callbacks.size(); --i > 0;) | ||||
{ | { | ||||
callbacks.getUnchecked(i)->audioDeviceIOCallback (inputChannelData, numInputChannels, | |||||
tempChans, numOutputChannels, numSamples); | |||||
callbacks.getUnchecked(i)->audioDeviceIOCallbackWithContext (inputChannelData, | |||||
numInputChannels, | |||||
tempChans, | |||||
numOutputChannels, | |||||
numSamples, | |||||
context); | |||||
for (int chan = 0; chan < numOutputChannels; ++chan) | for (int chan = 0; chan < numOutputChannels; ++chan) | ||||
{ | { | ||||
@@ -526,8 +526,12 @@ private: | |||||
class CallbackHandler; | class CallbackHandler; | ||||
std::unique_ptr<CallbackHandler> callbackHandler; | std::unique_ptr<CallbackHandler> callbackHandler; | ||||
void audioDeviceIOCallbackInt (const float** inputChannelData, int totalNumInputChannels, | |||||
float** outputChannelData, int totalNumOutputChannels, int numSamples); | |||||
void audioDeviceIOCallbackInt (const float** inputChannelData, | |||||
int totalNumInputChannels, | |||||
float** outputChannelData, | |||||
int totalNumOutputChannels, | |||||
int numSamples, | |||||
const AudioIODeviceCallbackContext& context); | |||||
void audioDeviceAboutToStartInt (AudioIODevice*); | void audioDeviceAboutToStartInt (AudioIODevice*); | ||||
void audioDeviceStoppedInt(); | void audioDeviceStoppedInt(); | ||||
void audioDeviceErrorInt (const String&); | void audioDeviceErrorInt (const String&); | ||||
@@ -25,6 +25,14 @@ namespace juce | |||||
class AudioIODevice; | class AudioIODevice; | ||||
/** Additional information that may be passed to the AudioIODeviceCallback. */ | |||||
struct AudioIODeviceCallbackContext | |||||
{ | |||||
/** If the host provides this information, this field will be set to point to | |||||
an integer holding the current value; otherwise, this will be nullptr. | |||||
*/ | |||||
const uint64_t* hostTimeNs = nullptr; | |||||
}; | |||||
//============================================================================== | //============================================================================== | ||||
/** | /** | ||||
@@ -87,7 +95,26 @@ public: | |||||
int numInputChannels, | int numInputChannels, | ||||
float** outputChannelData, | float** outputChannelData, | ||||
int numOutputChannels, | int numOutputChannels, | ||||
int numSamples) = 0; | |||||
int numSamples) | |||||
{ | |||||
ignoreUnused (inputChannelData, numInputChannels, outputChannelData, numOutputChannels, numSamples); | |||||
} | |||||
/** The same as audioDeviceIOCallback(), but with an additional context argument. | |||||
The default implementation of this function will call audioDeviceIOCallback(), | |||||
but you can override this function if you need to make use of the context information. | |||||
*/ | |||||
virtual void audioDeviceIOCallbackWithContext (const float** inputChannelData, | |||||
int numInputChannels, | |||||
float** outputChannelData, | |||||
int numOutputChannels, | |||||
int numSamples, | |||||
const AudioIODeviceCallbackContext& context) | |||||
{ | |||||
audioDeviceIOCallback (inputChannelData, numInputChannels, outputChannelData, numOutputChannels, numSamples); | |||||
ignoreUnused (context); | |||||
} | |||||
/** Called to indicate that the device is about to start calling back. | /** Called to indicate that the device is about to start calling back. | ||||
@@ -357,9 +357,11 @@ public: | |||||
if (callback != nullptr) | if (callback != nullptr) | ||||
{ | { | ||||
callback->audioDeviceIOCallback (inputChannelBuffer.getArrayOfReadPointers(), numClientInputChannels, | |||||
outputChannelBuffer.getArrayOfWritePointers(), numClientOutputChannels, | |||||
actualBufferSize); | |||||
callback->audioDeviceIOCallbackWithContext (inputChannelBuffer.getArrayOfReadPointers(), | |||||
numClientInputChannels, | |||||
outputChannelBuffer.getArrayOfWritePointers(), | |||||
numClientOutputChannels, | |||||
actualBufferSize, {}); | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
@@ -428,8 +428,12 @@ private: | |||||
{ | { | ||||
if (auto* cb = callback.exchange (nullptr)) | if (auto* cb = callback.exchange (nullptr)) | ||||
{ | { | ||||
cb->audioDeviceIOCallback (inputChannelData, numInputChannels, | |||||
outputChannelData, numOutputChannels, numFrames); | |||||
cb->audioDeviceIOCallbackWithContext (inputChannelData, | |||||
numInputChannels, | |||||
outputChannelData, | |||||
numOutputChannels, | |||||
numFrames, | |||||
{}); | |||||
callback.set (cb); | callback.set (cb); | ||||
} | } | ||||
else | else | ||||
@@ -605,7 +605,7 @@ public: | |||||
{ | { | ||||
if (auto* cb = callback.exchange (nullptr)) | if (auto* cb = callback.exchange (nullptr)) | ||||
{ | { | ||||
cb->audioDeviceIOCallback (inputChannelData, inputChannels, outputChannelData, outputChannels, bufferSize); | |||||
cb->audioDeviceIOCallbackWithContext (inputChannelData, inputChannels, outputChannelData, outputChannels, bufferSize, {}); | |||||
callback.set (cb); | callback.set (cb); | ||||
} | } | ||||
else | else | ||||
@@ -20,6 +20,8 @@ | |||||
============================================================================== | ============================================================================== | ||||
*/ | */ | ||||
#include <juce_audio_basics/native/juce_mac_CoreAudioTimeConversions.h> | |||||
namespace juce | namespace juce | ||||
{ | { | ||||
@@ -900,9 +902,14 @@ struct iOSAudioIODevice::Pimpl : public AudioPlayHead, | |||||
zeromem (inputData[c], channelDataSize); | zeromem (inputData[c], channelDataSize); | ||||
} | } | ||||
callback->audioDeviceIOCallback ((const float**) inputData, channelData.inputs ->numActiveChannels, | |||||
outputData, channelData.outputs->numActiveChannels, | |||||
(int) numFrames); | |||||
const auto nanos = time != nullptr ? timeConversions.hostTimeToNanos (time->mHostTime) : 0; | |||||
callback->audioDeviceIOCallbackWithContext ((const float**) inputData, | |||||
channelData.inputs ->numActiveChannels, | |||||
outputData, | |||||
channelData.outputs->numActiveChannels, | |||||
(int) numFrames, | |||||
{ (time != nullptr && (time->mFlags & kAudioTimeStampHostTimeValid) != 0) ? &nanos : nullptr }); | |||||
for (int c = 0; c < channelData.outputs->numActiveChannels; ++c) | for (int c = 0; c < channelData.outputs->numActiveChannels; ++c) | ||||
{ | { | ||||
@@ -1329,6 +1336,8 @@ struct iOSAudioIODevice::Pimpl : public AudioPlayHead, | |||||
AudioBuffer<float> audioData { 0, 0 }; | AudioBuffer<float> audioData { 0, 0 }; | ||||
}; | }; | ||||
CoreAudioTimeConversions timeConversions; | |||||
IOChannelData channelData; | IOChannelData channelData; | ||||
BigInteger requestedInputChannels, requestedOutputChannels; | BigInteger requestedInputChannels, requestedOutputChannels; | ||||
@@ -711,11 +711,12 @@ public: | |||||
if (callback != nullptr) | if (callback != nullptr) | ||||
{ | { | ||||
callback->audioDeviceIOCallback (inputChannelDataForCallback.getRawDataPointer(), | |||||
inputChannelDataForCallback.size(), | |||||
outputChannelDataForCallback.getRawDataPointer(), | |||||
outputChannelDataForCallback.size(), | |||||
bufferSize); | |||||
callback->audioDeviceIOCallbackWithContext (inputChannelDataForCallback.getRawDataPointer(), | |||||
inputChannelDataForCallback.size(), | |||||
outputChannelDataForCallback.getRawDataPointer(), | |||||
outputChannelDataForCallback.size(), | |||||
bufferSize, | |||||
{}); | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
@@ -433,9 +433,12 @@ private: | |||||
channelOutBuffer[ch] = &context.analogOut[(Frames) (ch - analogChannelStart) * context.audioFrames]; | channelOutBuffer[ch] = &context.analogOut[(Frames) (ch - analogChannelStart) * context.audioFrames]; | ||||
} | } | ||||
callback->audioDeviceIOCallback (channelInBuffer.getData(), actualNumberOfInputs, | |||||
channelOutBuffer.getData(), actualNumberOfOutputs, | |||||
(int) context.audioFrames); | |||||
callback->audioDeviceIOCallbackWithContext (channelInBuffer.getData(), | |||||
actualNumberOfInputs, | |||||
channelOutBuffer.getData(), | |||||
actualNumberOfOutputs, | |||||
(int) context.audioFrames, | |||||
{}); | |||||
} | } | ||||
} | } | ||||
@@ -462,8 +462,12 @@ private: | |||||
if (callback != nullptr) | if (callback != nullptr) | ||||
{ | { | ||||
if ((numActiveInChans + numActiveOutChans) > 0) | if ((numActiveInChans + numActiveOutChans) > 0) | ||||
callback->audioDeviceIOCallback (const_cast<const float**> (inChans.getData()), numActiveInChans, | |||||
outChans, numActiveOutChans, numSamples); | |||||
callback->audioDeviceIOCallbackWithContext (const_cast<const float**> (inChans.getData()), | |||||
numActiveInChans, | |||||
outChans, | |||||
numActiveOutChans, | |||||
numSamples, | |||||
{}); | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
@@ -20,6 +20,8 @@ | |||||
============================================================================== | ============================================================================== | ||||
*/ | */ | ||||
#include <juce_audio_basics/native/juce_mac_CoreAudioTimeConversions.h> | |||||
namespace juce | namespace juce | ||||
{ | { | ||||
@@ -746,7 +748,8 @@ public: | |||||
double getSampleRate() const { return sampleRate; } | double getSampleRate() const { return sampleRate; } | ||||
int getBufferSize() const { return bufferSize; } | int getBufferSize() const { return bufferSize; } | ||||
void audioCallback (const AudioBufferList* inInputData, | |||||
void audioCallback (const AudioTimeStamp* timeStamp, | |||||
const AudioBufferList* inInputData, | |||||
AudioBufferList* outOutputData) | AudioBufferList* outOutputData) | ||||
{ | { | ||||
const ScopedLock sl (callbackLock); | const ScopedLock sl (callbackLock); | ||||
@@ -778,11 +781,14 @@ public: | |||||
} | } | ||||
} | } | ||||
callback->audioDeviceIOCallback (const_cast<const float**> (tempInputBuffers.get()), | |||||
numInputChans, | |||||
tempOutputBuffers, | |||||
numOutputChans, | |||||
bufferSize); | |||||
const auto nanos = timeStamp != nullptr ? timeConversions.hostTimeToNanos (timeStamp->mHostTime) : 0; | |||||
callback->audioDeviceIOCallbackWithContext (const_cast<const float**> (tempInputBuffers.get()), | |||||
numInputChans, | |||||
tempOutputBuffers, | |||||
numOutputChans, | |||||
bufferSize, | |||||
{ timeStamp != nullptr ? &nanos : nullptr }); | |||||
for (int i = numOutputChans; --i >= 0;) | for (int i = numOutputChans; --i >= 0;) | ||||
{ | { | ||||
@@ -838,6 +844,7 @@ public: | |||||
AudioDeviceIOProcID audioProcID = {}; | AudioDeviceIOProcID audioProcID = {}; | ||||
private: | private: | ||||
CoreAudioTimeConversions timeConversions; | |||||
AudioIODeviceCallback* callback = nullptr; | AudioIODeviceCallback* callback = nullptr; | ||||
CriticalSection callbackLock; | CriticalSection callbackLock; | ||||
AudioDeviceID deviceID; | AudioDeviceID deviceID; | ||||
@@ -876,14 +883,14 @@ private: | |||||
} | } | ||||
static OSStatus audioIOProc (AudioDeviceID /*inDevice*/, | static OSStatus audioIOProc (AudioDeviceID /*inDevice*/, | ||||
const AudioTimeStamp* /*inNow*/, | |||||
const AudioTimeStamp* inNow, | |||||
const AudioBufferList* inInputData, | const AudioBufferList* inInputData, | ||||
const AudioTimeStamp* /*inInputTime*/, | const AudioTimeStamp* /*inInputTime*/, | ||||
AudioBufferList* outOutputData, | AudioBufferList* outOutputData, | ||||
const AudioTimeStamp* /*inOutputTime*/, | const AudioTimeStamp* /*inOutputTime*/, | ||||
void* device) | void* device) | ||||
{ | { | ||||
static_cast<CoreAudioInternal*> (device)->audioCallback (inInputData, outOutputData); | |||||
static_cast<CoreAudioInternal*> (device)->audioCallback (inNow, inInputData, outOutputData); | |||||
return noErr; | return noErr; | ||||
} | } | ||||
@@ -1624,8 +1631,12 @@ private: | |||||
const ScopedLock sl (callbackLock); | const ScopedLock sl (callbackLock); | ||||
if (callback != nullptr) | if (callback != nullptr) | ||||
callback->audioDeviceIOCallback ((const float**) inputChans.getRawDataPointer(), numInputChans, | |||||
outputChans.getRawDataPointer(), numOutputChans, numSamples); | |||||
callback->audioDeviceIOCallbackWithContext ((const float**) inputChans.getRawDataPointer(), | |||||
numInputChans, | |||||
outputChans.getRawDataPointer(), | |||||
numOutputChans, | |||||
numSamples, | |||||
{}); // Can't predict when the next output callback will happen | |||||
else | else | ||||
didCallback = false; | didCallback = false; | ||||
} | } | ||||
@@ -1920,9 +1931,12 @@ private: | |||||
outputFifo.finishedWrite (size1 + size2); | outputFifo.finishedWrite (size1 + size2); | ||||
} | } | ||||
void audioDeviceIOCallback (const float** inputChannelData, int numInputChannels, | |||||
float** outputChannelData, int numOutputChannels, | |||||
int numSamples) override | |||||
void audioDeviceIOCallbackWithContext (const float** inputChannelData, | |||||
int numInputChannels, | |||||
float** outputChannelData, | |||||
int numOutputChannels, | |||||
int numSamples, | |||||
const AudioIODeviceCallbackContext&) override | |||||
{ | { | ||||
if (numInputChannels > 0) | if (numInputChannels > 0) | ||||
{ | { | ||||
@@ -1326,8 +1326,12 @@ private: | |||||
inputFormat[i].convertToFloat (infos[i].buffers[bufferIndex], inBuffers[i], samps); | inputFormat[i].convertToFloat (infos[i].buffers[bufferIndex], inBuffers[i], samps); | ||||
} | } | ||||
currentCallback->audioDeviceIOCallback (const_cast<const float**> (inBuffers.getData()), numActiveInputChans, | |||||
outBuffers, numActiveOutputChans, samps); | |||||
currentCallback->audioDeviceIOCallbackWithContext (const_cast<const float**> (inBuffers.getData()), | |||||
numActiveInputChans, | |||||
outBuffers, | |||||
numActiveOutputChans, | |||||
samps, | |||||
{}); | |||||
for (int i = 0; i < numActiveOutputChans; ++i) | for (int i = 0; i < numActiveOutputChans; ++i) | ||||
{ | { | ||||
@@ -1016,9 +1016,12 @@ public: | |||||
if (isStarted) | if (isStarted) | ||||
{ | { | ||||
callback->audioDeviceIOCallback (inputBuffers.getArrayOfReadPointers(), inputBuffers.getNumChannels(), | |||||
outputBuffers.getArrayOfWritePointers(), outputBuffers.getNumChannels(), | |||||
bufferSizeSamples); | |||||
callback->audioDeviceIOCallbackWithContext (inputBuffers.getArrayOfReadPointers(), | |||||
inputBuffers.getNumChannels(), | |||||
outputBuffers.getArrayOfWritePointers(), | |||||
outputBuffers.getNumChannels(), | |||||
bufferSizeSamples, | |||||
{}); | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
@@ -1515,8 +1515,12 @@ public: | |||||
const ScopedTryLock sl (startStopLock); | const ScopedTryLock sl (startStopLock); | ||||
if (sl.isLocked() && isStarted) | if (sl.isLocked() && isStarted) | ||||
callback->audioDeviceIOCallback (const_cast<const float**> (inputBuffers), numInputBuffers, | |||||
outputBuffers, numOutputBuffers, bufferSize); | |||||
callback->audioDeviceIOCallbackWithContext (const_cast<const float**> (inputBuffers), | |||||
numInputBuffers, | |||||
outputBuffers, | |||||
numOutputBuffers, | |||||
bufferSize, | |||||
{}); | |||||
else | else | ||||
outs.clear(); | outs.clear(); | ||||
} | } | ||||
@@ -77,6 +77,7 @@ JUCE_END_IGNORE_WARNINGS_GCC_LIKE | |||||
#include "../utility/juce_CarbonVisibility.h" | #include "../utility/juce_CarbonVisibility.h" | ||||
#include <juce_audio_basics/native/juce_mac_CoreAudioLayouts.h> | #include <juce_audio_basics/native/juce_mac_CoreAudioLayouts.h> | ||||
#include <juce_audio_basics/native/juce_mac_CoreAudioTimeConversions.h> | |||||
#include <juce_audio_processors/format_types/juce_LegacyAudioParameter.cpp> | #include <juce_audio_processors/format_types/juce_LegacyAudioParameter.cpp> | ||||
#include <juce_audio_processors/format_types/juce_AU_Shared.h> | #include <juce_audio_processors/format_types/juce_AU_Shared.h> | ||||
@@ -1287,6 +1288,22 @@ public: | |||||
{ | { | ||||
lastTimeStamp = inTimeStamp; | lastTimeStamp = inTimeStamp; | ||||
jassert (! juceFilter->getHostTimeNs()); | |||||
if ((inTimeStamp.mFlags & kAudioTimeStampHostTimeValid) != 0) | |||||
{ | |||||
const auto timestamp = timeConversions.hostTimeToNanos (inTimeStamp.mHostTime); | |||||
juceFilter->setHostTimeNanos (×tamp); | |||||
} | |||||
struct AtEndOfScope | |||||
{ | |||||
~AtEndOfScope() { proc.setHostTimeNanos (nullptr); } | |||||
AudioProcessor& proc; | |||||
}; | |||||
const AtEndOfScope scope { *juceFilter }; | |||||
// prepare buffers | // prepare buffers | ||||
{ | { | ||||
pullInputAudio (ioActionFlags, inTimeStamp, nFrames); | pullInputAudio (ioActionFlags, inTimeStamp, nFrames); | ||||
@@ -1816,6 +1833,7 @@ private: | |||||
// According to the docs, this is the maximum size of a MIDIPacketList. | // According to the docs, this is the maximum size of a MIDIPacketList. | ||||
static constexpr UInt32 packetListBytes = 65536; | static constexpr UInt32 packetListBytes = 65536; | ||||
CoreAudioTimeConversions timeConversions; | |||||
AudioUnitEvent auEvent; | AudioUnitEvent auEvent; | ||||
mutable Array<AUPreset> presetsArray; | mutable Array<AUPreset> presetsArray; | ||||
CriticalSection incomingMidiLock; | CriticalSection incomingMidiLock; | ||||
@@ -51,6 +51,7 @@ | |||||
#include <juce_graphics/native/juce_mac_CoreGraphicsHelpers.h> | #include <juce_graphics/native/juce_mac_CoreGraphicsHelpers.h> | ||||
#include <juce_audio_basics/native/juce_mac_CoreAudioLayouts.h> | #include <juce_audio_basics/native/juce_mac_CoreAudioLayouts.h> | ||||
#include <juce_audio_basics/native/juce_mac_CoreAudioTimeConversions.h> | |||||
#include <juce_audio_processors/format_types/juce_LegacyAudioParameter.cpp> | #include <juce_audio_processors/format_types/juce_LegacyAudioParameter.cpp> | ||||
#include <juce_audio_processors/format_types/juce_AU_Shared.h> | #include <juce_audio_processors/format_types/juce_AU_Shared.h> | ||||
@@ -1521,6 +1522,23 @@ private: | |||||
const auto numProcessorBusesOut = AudioUnitHelpers::getBusCount (processor, false); | const auto numProcessorBusesOut = AudioUnitHelpers::getBusCount (processor, false); | ||||
if (timestamp != nullptr) | |||||
{ | |||||
if ((timestamp->mFlags & kAudioTimeStampHostTimeValid) != 0) | |||||
{ | |||||
const auto convertedTime = timeConversions.hostTimeToNanos (timestamp->mHostTime); | |||||
getAudioProcessor().setHostTimeNanos (&convertedTime); | |||||
} | |||||
} | |||||
struct AtEndOfScope | |||||
{ | |||||
~AtEndOfScope() { proc.setHostTimeNanos (nullptr); } | |||||
AudioProcessor& proc; | |||||
}; | |||||
const AtEndOfScope scope { getAudioProcessor() }; | |||||
if (lastTimeStamp.mSampleTime != timestamp->mSampleTime) | if (lastTimeStamp.mSampleTime != timestamp->mSampleTime) | ||||
{ | { | ||||
// process params and incoming midi (only once for a given timestamp) | // process params and incoming midi (only once for a given timestamp) | ||||
@@ -1764,6 +1782,7 @@ private: | |||||
int totalInChannels, totalOutChannels; | int totalInChannels, totalOutChannels; | ||||
CoreAudioTimeConversions timeConversions; | |||||
std::unique_ptr<AUAudioUnitBusArray, NSObjectDeleter> inputBusses, outputBusses; | std::unique_ptr<AUAudioUnitBusArray, NSObjectDeleter> inputBusses, outputBusses; | ||||
ObjCBlock<AUImplementorValueObserver> paramObserver; | ObjCBlock<AUImplementorValueObserver> paramObserver; | ||||
@@ -441,11 +441,12 @@ private: | |||||
inner.audioDeviceAboutToStart (device); | inner.audioDeviceAboutToStart (device); | ||||
} | } | ||||
void audioDeviceIOCallback (const float** inputChannelData, | |||||
int numInputChannels, | |||||
float** outputChannelData, | |||||
int numOutputChannels, | |||||
int numSamples) override | |||||
void audioDeviceIOCallbackWithContext (const float** inputChannelData, | |||||
int numInputChannels, | |||||
float** outputChannelData, | |||||
int numOutputChannels, | |||||
int numSamples, | |||||
const AudioIODeviceCallbackContext& context) override | |||||
{ | { | ||||
jassertquiet ((int) storedInputChannels.size() == numInputChannels); | jassertquiet ((int) storedInputChannels.size() == numInputChannels); | ||||
jassertquiet ((int) storedOutputChannels.size() == numOutputChannels); | jassertquiet ((int) storedOutputChannels.size() == numOutputChannels); | ||||
@@ -459,11 +460,12 @@ private: | |||||
initChannelPointers (inputChannelData, storedInputChannels, position); | initChannelPointers (inputChannelData, storedInputChannels, position); | ||||
initChannelPointers (outputChannelData, storedOutputChannels, position); | initChannelPointers (outputChannelData, storedOutputChannels, position); | ||||
inner.audioDeviceIOCallback (storedInputChannels.data(), | |||||
(int) storedInputChannels.size(), | |||||
storedOutputChannels.data(), | |||||
(int) storedOutputChannels.size(), | |||||
blockLength); | |||||
inner.audioDeviceIOCallbackWithContext (storedInputChannels.data(), | |||||
(int) storedInputChannels.size(), | |||||
storedOutputChannels.data(), | |||||
(int) storedOutputChannels.size(), | |||||
blockLength, | |||||
context); | |||||
position += blockLength; | position += blockLength; | ||||
} | } | ||||
@@ -591,11 +593,12 @@ private: | |||||
}; | }; | ||||
//============================================================================== | //============================================================================== | ||||
void audioDeviceIOCallback (const float** inputChannelData, | |||||
int numInputChannels, | |||||
float** outputChannelData, | |||||
int numOutputChannels, | |||||
int numSamples) override | |||||
void audioDeviceIOCallbackWithContext (const float** inputChannelData, | |||||
int numInputChannels, | |||||
float** outputChannelData, | |||||
int numOutputChannels, | |||||
int numSamples, | |||||
const AudioIODeviceCallbackContext& context) override | |||||
{ | { | ||||
if (muteInput) | if (muteInput) | ||||
{ | { | ||||
@@ -603,8 +606,12 @@ private: | |||||
inputChannelData = emptyBuffer.getArrayOfReadPointers(); | inputChannelData = emptyBuffer.getArrayOfReadPointers(); | ||||
} | } | ||||
player.audioDeviceIOCallback (inputChannelData, numInputChannels, | |||||
outputChannelData, numOutputChannels, numSamples); | |||||
player.audioDeviceIOCallbackWithContext (inputChannelData, | |||||
numInputChannels, | |||||
outputChannelData, | |||||
numOutputChannels, | |||||
numSamples, | |||||
context); | |||||
} | } | ||||
void audioDeviceAboutToStart (AudioIODevice* device) override | void audioDeviceAboutToStart (AudioIODevice* device) override | ||||
@@ -18,6 +18,7 @@ | |||||
#include <juce_core/system/juce_CompilerWarnings.h> | #include <juce_core/system/juce_CompilerWarnings.h> | ||||
#include <juce_core/system/juce_TargetPlatform.h> | #include <juce_core/system/juce_TargetPlatform.h> | ||||
#include <juce_core/containers/juce_Optional.h> | |||||
#include "../utility/juce_CheckSettingMacros.h" | #include "../utility/juce_CheckSettingMacros.h" | ||||
#if JucePlugin_Build_VST | #if JucePlugin_Build_VST | ||||
@@ -354,7 +355,7 @@ public: | |||||
jassert (isProcessing); | jassert (isProcessing); | ||||
// (tragically, some hosts actually need this, although it's stupid to have | // (tragically, some hosts actually need this, although it's stupid to have | ||||
// to do it here..) | |||||
// to do it here.) | |||||
if (! isProcessing) | if (! isProcessing) | ||||
resume(); | resume(); | ||||
@@ -389,6 +390,16 @@ public: | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
updateCallbackContextInfo(); | |||||
struct AtEndOfScope | |||||
{ | |||||
~AtEndOfScope() { proc.setHostTimeNanos (nullptr); } | |||||
AudioProcessor& proc; | |||||
}; | |||||
const AtEndOfScope scope { *processor }; | |||||
int i; | int i; | ||||
for (i = 0; i < numOut; ++i) | for (i = 0; i < numOut; ++i) | ||||
{ | { | ||||
@@ -589,25 +600,28 @@ public: | |||||
} | } | ||||
} | } | ||||
//============================================================================== | |||||
bool getCurrentPosition (AudioPlayHead::CurrentPositionInfo& info) override | |||||
void updateCallbackContextInfo() | |||||
{ | { | ||||
const Vst2::VstTimeInfo* ti = nullptr; | const Vst2::VstTimeInfo* ti = nullptr; | ||||
if (hostCallback != nullptr) | if (hostCallback != nullptr) | ||||
{ | { | ||||
int32 flags = Vst2::kVstPpqPosValid | Vst2::kVstTempoValid | int32 flags = Vst2::kVstPpqPosValid | Vst2::kVstTempoValid | ||||
| Vst2::kVstBarsValid | Vst2::kVstCyclePosValid | |||||
| Vst2::kVstTimeSigValid | Vst2::kVstSmpteValid | |||||
| Vst2::kVstClockValid; | |||||
| Vst2::kVstBarsValid | Vst2::kVstCyclePosValid | |||||
| Vst2::kVstTimeSigValid | Vst2::kVstSmpteValid | |||||
| Vst2::kVstClockValid | Vst2::kVstNanosValid; | |||||
auto result = hostCallback (&vstEffect, Vst2::audioMasterGetTime, 0, flags, nullptr, 0); | auto result = hostCallback (&vstEffect, Vst2::audioMasterGetTime, 0, flags, nullptr, 0); | ||||
ti = reinterpret_cast<Vst2::VstTimeInfo*> (result); | ti = reinterpret_cast<Vst2::VstTimeInfo*> (result); | ||||
} | } | ||||
if (ti == nullptr || ti->sampleRate <= 0) | if (ti == nullptr || ti->sampleRate <= 0) | ||||
return false; | |||||
{ | |||||
currentPosition.reset(); | |||||
return; | |||||
} | |||||
auto& info = currentPosition.emplace(); | |||||
info.bpm = (ti->flags & Vst2::kVstTempoValid) != 0 ? ti->tempo : 0.0; | info.bpm = (ti->flags & Vst2::kVstTempoValid) != 0 ? ti->tempo : 0.0; | ||||
if ((ti->flags & Vst2::kVstTimeSigValid) != 0) | if ((ti->flags & Vst2::kVstTimeSigValid) != 0) | ||||
@@ -675,6 +689,20 @@ public: | |||||
info.ppqLoopEnd = 0; | info.ppqLoopEnd = 0; | ||||
} | } | ||||
if ((ti->flags & Vst2::kVstNanosValid) != 0) | |||||
{ | |||||
const auto nanos = (uint64_t) ti->nanoSeconds; | |||||
processor->setHostTimeNanos (&nanos); | |||||
} | |||||
} | |||||
//============================================================================== | |||||
bool getCurrentPosition (AudioPlayHead::CurrentPositionInfo& info) override | |||||
{ | |||||
if (! currentPosition.hasValue()) | |||||
return false; | |||||
info = *currentPosition; | |||||
return true; | return true; | ||||
} | } | ||||
@@ -2068,6 +2096,7 @@ private: | |||||
Vst2::ERect editorRect; | Vst2::ERect editorRect; | ||||
MidiBuffer midiEvents; | MidiBuffer midiEvents; | ||||
VSTMidiEventList outgoingEvents; | VSTMidiEventList outgoingEvents; | ||||
Optional<CurrentPositionInfo> currentPosition; | |||||
LegacyAudioParametersWrapper juceParameters; | LegacyAudioParametersWrapper juceParameters; | ||||
@@ -3170,6 +3170,12 @@ public: | |||||
{ | { | ||||
processContext = *data.processContext; | processContext = *data.processContext; | ||||
if ((processContext.state & Vst::ProcessContext::kSystemTimeValid) != 0) | |||||
{ | |||||
const auto timestamp = (uint64_t) processContext.systemTime; | |||||
getPluginInstance().setHostTimeNanos (×tamp); | |||||
} | |||||
if (juceVST3EditController != nullptr) | if (juceVST3EditController != nullptr) | ||||
juceVST3EditController->vst3IsPlaying = (processContext.state & Vst::ProcessContext::kPlaying) != 0; | juceVST3EditController->vst3IsPlaying = (processContext.state & Vst::ProcessContext::kPlaying) != 0; | ||||
} | } | ||||
@@ -3181,6 +3187,14 @@ public: | |||||
juceVST3EditController->vst3IsPlaying = false; | juceVST3EditController->vst3IsPlaying = false; | ||||
} | } | ||||
struct AtEndOfScope | |||||
{ | |||||
~AtEndOfScope() { proc.setHostTimeNanos (nullptr); } | |||||
AudioProcessor& proc; | |||||
}; | |||||
const AtEndOfScope scope { getPluginInstance() }; | |||||
midiBuffer.clear(); | midiBuffer.clear(); | ||||
if (data.inputParameterChanges != nullptr) | if (data.inputParameterChanges != nullptr) | ||||
@@ -34,6 +34,7 @@ JUCE_BEGIN_IGNORE_WARNINGS_GCC_LIKE ("-Wdeprecated-declarations") | |||||
#include <CoreAudioKit/AUViewController.h> | #include <CoreAudioKit/AUViewController.h> | ||||
#include <juce_audio_basics/native/juce_mac_CoreAudioTimeConversions.h> | |||||
#include <juce_audio_basics/native/juce_mac_CoreAudioLayouts.h> | #include <juce_audio_basics/native/juce_mac_CoreAudioLayouts.h> | ||||
#include <juce_audio_basics/midi/juce_MidiDataConcatenator.h> | #include <juce_audio_basics/midi/juce_MidiDataConcatenator.h> | ||||
#include "juce_AU_Shared.h" | #include "juce_AU_Shared.h" | ||||
@@ -332,39 +333,25 @@ namespace AudioUnitFormatHelpers | |||||
#endif | #endif | ||||
template <typename Value> | template <typename Value> | ||||
struct BasicOptional | |||||
{ | |||||
BasicOptional() = default; | |||||
explicit constexpr BasicOptional (Value&& v) : value (std::move (v)), isValid (true) {} | |||||
explicit constexpr BasicOptional (const Value& v) : value (v), isValid (true) {} | |||||
explicit constexpr operator bool() const noexcept { return isValid; } | |||||
Value value; | |||||
bool isValid { false }; | |||||
}; | |||||
template <typename Value> | |||||
static BasicOptional<Value> tryGetProperty (AudioUnit inUnit, | |||||
AudioUnitPropertyID inID, | |||||
AudioUnitScope inScope, | |||||
AudioUnitElement inElement) | |||||
static Optional<Value> tryGetProperty (AudioUnit inUnit, | |||||
AudioUnitPropertyID inID, | |||||
AudioUnitScope inScope, | |||||
AudioUnitElement inElement) | |||||
{ | { | ||||
Value data; | Value data; | ||||
auto size = (UInt32) sizeof (Value); | auto size = (UInt32) sizeof (Value); | ||||
if (AudioUnitGetProperty (inUnit, inID, inScope, inElement, &data, &size) == noErr) | if (AudioUnitGetProperty (inUnit, inID, inScope, inElement, &data, &size) == noErr) | ||||
return BasicOptional<Value> (data); | |||||
return data; | |||||
return BasicOptional<Value>(); | |||||
return {}; | |||||
} | } | ||||
static UInt32 getElementCount (AudioUnit comp, AudioUnitScope scope) noexcept | static UInt32 getElementCount (AudioUnit comp, AudioUnitScope scope) noexcept | ||||
{ | { | ||||
const auto count = tryGetProperty<UInt32> (comp, kAudioUnitProperty_ElementCount, scope, 0); | const auto count = tryGetProperty<UInt32> (comp, kAudioUnitProperty_ElementCount, scope, 0); | ||||
jassert (count.isValid); | |||||
return count.value; | |||||
jassert (count); | |||||
return *count; | |||||
} | } | ||||
/* The plugin may expect its channels in a particular order, reported to the host | /* The plugin may expect its channels in a particular order, reported to the host | ||||
@@ -390,8 +377,8 @@ namespace AudioUnitFormatHelpers | |||||
if (const auto layout = tryGetProperty<AudioChannelLayout> (comp, kAudioUnitProperty_AudioChannelLayout, scope, busIndex)) | if (const auto layout = tryGetProperty<AudioChannelLayout> (comp, kAudioUnitProperty_AudioChannelLayout, scope, busIndex)) | ||||
{ | { | ||||
const auto juceChannelOrder = CoreAudioLayouts::fromCoreAudio (layout.value); | |||||
const auto auChannelOrder = CoreAudioLayouts::getCoreAudioLayoutChannels (layout.value); | |||||
const auto juceChannelOrder = CoreAudioLayouts::fromCoreAudio (*layout); | |||||
const auto auChannelOrder = CoreAudioLayouts::getCoreAudioLayoutChannels (*layout); | |||||
for (auto juceChannelIndex = 0; juceChannelIndex < juceChannelOrder.size(); ++juceChannelIndex) | for (auto juceChannelIndex = 0; juceChannelIndex < juceChannelOrder.size(); ++juceChannelIndex) | ||||
busMap.push_back ((size_t) auChannelOrder.indexOf (juceChannelOrder.getTypeOfChannel (juceChannelIndex))); | busMap.push_back ((size_t) auChannelOrder.indexOf (juceChannelOrder.getTypeOfChannel (juceChannelIndex))); | ||||
@@ -1090,7 +1077,7 @@ public: | |||||
zerostruct (timeStamp); | zerostruct (timeStamp); | ||||
timeStamp.mSampleTime = 0; | timeStamp.mSampleTime = 0; | ||||
timeStamp.mHostTime = GetCurrentHostTime (0, newSampleRate, isAUv3); | |||||
timeStamp.mHostTime = mach_absolute_time(); | |||||
timeStamp.mFlags = kAudioTimeStampSampleTimeValid | kAudioTimeStampHostTimeValid; | timeStamp.mFlags = kAudioTimeStampSampleTimeValid | kAudioTimeStampHostTimeValid; | ||||
wasPlaying = false; | wasPlaying = false; | ||||
@@ -1148,6 +1135,17 @@ public: | |||||
void processAudio (AudioBuffer<float>& buffer, MidiBuffer& midiMessages, bool processBlockBypassedCalled) | void processAudio (AudioBuffer<float>& buffer, MidiBuffer& midiMessages, bool processBlockBypassedCalled) | ||||
{ | { | ||||
if (const auto* hostTimeNs = getHostTimeNs()) | |||||
{ | |||||
timeStamp.mHostTime = *hostTimeNs; | |||||
timeStamp.mFlags |= kAudioTimeStampHostTimeValid; | |||||
} | |||||
else | |||||
{ | |||||
timeStamp.mHostTime = 0; | |||||
timeStamp.mFlags &= ~kAudioTimeStampHostTimeValid; | |||||
} | |||||
// If these are hit, we might allocate in the process block! | // If these are hit, we might allocate in the process block! | ||||
jassert (buffer.getNumChannels() <= preparedChannels); | jassert (buffer.getNumChannels() <= preparedChannels); | ||||
jassert (buffer.getNumSamples() <= preparedSamples); | jassert (buffer.getNumSamples() <= preparedSamples); | ||||
@@ -1156,7 +1154,7 @@ public: | |||||
// to the following bus. | // to the following bus. | ||||
inputBuffer.makeCopyOf (buffer, true); | inputBuffer.makeCopyOf (buffer, true); | ||||
auto numSamples = buffer.getNumSamples(); | |||||
const auto numSamples = buffer.getNumSamples(); | |||||
if (auSupportsBypass) | if (auSupportsBypass) | ||||
{ | { | ||||
@@ -1170,8 +1168,6 @@ public: | |||||
if (prepared) | if (prepared) | ||||
{ | { | ||||
timeStamp.mHostTime = GetCurrentHostTime (numSamples, getSampleRate(), isAUv3); | |||||
const auto numOutputBuses = getBusCount (false); | const auto numOutputBuses = getBusCount (false); | ||||
for (int i = 0; i < numOutputBuses; ++i) | for (int i = 0; i < numOutputBuses; ++i) | ||||
@@ -1615,6 +1611,8 @@ private: | |||||
friend class AudioUnitPluginWindowCocoa; | friend class AudioUnitPluginWindowCocoa; | ||||
friend class AudioUnitPluginFormat; | friend class AudioUnitPluginFormat; | ||||
CoreAudioTimeConversions timeConversions; | |||||
AudioComponentDescription componentDesc; | AudioComponentDescription componentDesc; | ||||
AudioComponent auComponent; | AudioComponent auComponent; | ||||
String pluginName, manufacturer, version; | String pluginName, manufacturer, version; | ||||
@@ -2164,29 +2162,6 @@ private: | |||||
} | } | ||||
//============================================================================== | //============================================================================== | ||||
static UInt64 GetCurrentHostTime (int numSamples, double sampleRate, bool isAUv3) noexcept | |||||
{ | |||||
#if ! JUCE_IOS | |||||
if (! isAUv3) | |||||
return AudioGetCurrentHostTime(); | |||||
#else | |||||
ignoreUnused (isAUv3); | |||||
#endif | |||||
UInt64 currentTime = mach_absolute_time(); | |||||
static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 }; | |||||
if (sTimebaseInfo.denom == 0) | |||||
mach_timebase_info (&sTimebaseInfo); | |||||
auto bufferNanos = static_cast<double> (numSamples) * 1.0e9 / sampleRate; | |||||
auto bufferTicks = static_cast<UInt64> (std::ceil (bufferNanos * (static_cast<double> (sTimebaseInfo.denom) | |||||
/ static_cast<double> (sTimebaseInfo.numer)))); | |||||
currentTime += bufferTicks; | |||||
return currentTime; | |||||
} | |||||
bool isBusCountWritable (bool isInput) const noexcept | bool isBusCountWritable (bool isInput) const noexcept | ||||
{ | { | ||||
UInt32 countSize; | UInt32 countSize; | ||||
@@ -20,6 +20,8 @@ | |||||
#ifndef DOXYGEN | #ifndef DOXYGEN | ||||
#include <juce_core/containers/juce_Optional.h> | |||||
namespace juce | namespace juce | ||||
{ | { | ||||
@@ -1033,10 +1035,8 @@ public: | |||||
if (eventList.getEvent (i, e) != Steinberg::kResultOk) | if (eventList.getEvent (i, e) != Steinberg::kResultOk) | ||||
continue; | continue; | ||||
const auto message = toMidiMessage (e); | |||||
if (message.isValid) | |||||
result.addEvent (message.item, e.sampleOffset); | |||||
if (const auto message = toMidiMessage (e)) | |||||
result.addEvent (*message, e.sampleOffset); | |||||
} | } | ||||
} | } | ||||
@@ -1109,15 +1109,12 @@ private: | |||||
} | } | ||||
} | } | ||||
auto maybeEvent = createVstEvent (msg, metadata.data, kind); | |||||
if (! maybeEvent.isValid) | |||||
continue; | |||||
auto& e = maybeEvent.item; | |||||
e.busIndex = 0; | |||||
e.sampleOffset = metadata.samplePosition; | |||||
result.addEvent (e); | |||||
if (auto maybeEvent = createVstEvent (msg, metadata.data, kind)) | |||||
{ | |||||
maybeEvent->busIndex = 0; | |||||
maybeEvent->sampleOffset = metadata.samplePosition; | |||||
result.addEvent (*maybeEvent); | |||||
} | |||||
} | } | ||||
} | } | ||||
@@ -1234,19 +1231,9 @@ private: | |||||
msg.getQuarterFrameValue()); | msg.getQuarterFrameValue()); | ||||
} | } | ||||
template <typename Item> | |||||
struct BasicOptional final | |||||
{ | |||||
BasicOptional() noexcept = default; | |||||
BasicOptional (const Item& i) noexcept : item { i }, isValid { true } {} | |||||
Item item; | |||||
bool isValid{}; | |||||
}; | |||||
static BasicOptional<Steinberg::Vst::Event> createVstEvent (const MidiMessage& msg, | |||||
const uint8* midiEventData, | |||||
EventConversionKind kind) noexcept | |||||
static Optional<Steinberg::Vst::Event> createVstEvent (const MidiMessage& msg, | |||||
const uint8* midiEventData, | |||||
EventConversionKind kind) noexcept | |||||
{ | { | ||||
if (msg.isNoteOn()) | if (msg.isNoteOn()) | ||||
return createNoteOnEvent (msg); | return createNoteOnEvent (msg); | ||||
@@ -1290,7 +1277,7 @@ private: | |||||
return {}; | return {}; | ||||
} | } | ||||
static BasicOptional<MidiMessage> toMidiMessage (const Steinberg::Vst::LegacyMIDICCOutEvent& e) | |||||
static Optional<MidiMessage> toMidiMessage (const Steinberg::Vst::LegacyMIDICCOutEvent& e) | |||||
{ | { | ||||
if (e.controlNumber <= 127) | if (e.controlNumber <= 127) | ||||
return MidiMessage::controllerEvent (createSafeChannel (int16 (e.channel)), | return MidiMessage::controllerEvent (createSafeChannel (int16 (e.channel)), | ||||
@@ -1327,7 +1314,7 @@ private: | |||||
} | } | ||||
} | } | ||||
static BasicOptional<MidiMessage> toMidiMessage (const Steinberg::Vst::Event& e) | |||||
static Optional<MidiMessage> toMidiMessage (const Steinberg::Vst::Event& e) | |||||
{ | { | ||||
switch (e.type) | switch (e.type) | ||||
{ | { | ||||
@@ -240,7 +240,10 @@ static void setStateForAllBusesOfType (Vst::IComponent* component, | |||||
} | } | ||||
//============================================================================== | //============================================================================== | ||||
static void toProcessContext (Vst::ProcessContext& context, AudioPlayHead* playHead, double sampleRate) | |||||
static void toProcessContext (Vst::ProcessContext& context, | |||||
AudioPlayHead* playHead, | |||||
double sampleRate, | |||||
const uint64_t* hostTimeNs) | |||||
{ | { | ||||
jassert (sampleRate > 0.0); //Must always be valid, as stated by the VST3 SDK | jassert (sampleRate > 0.0); //Must always be valid, as stated by the VST3 SDK | ||||
@@ -295,6 +298,13 @@ static void toProcessContext (Vst::ProcessContext& context, AudioPlayHead* playH | |||||
if (context.timeSigNumerator > 0 && context.timeSigDenominator > 0) | if (context.timeSigNumerator > 0 && context.timeSigDenominator > 0) | ||||
context.state |= ProcessContext::kTimeSigValid; | context.state |= ProcessContext::kTimeSigValid; | ||||
if (hostTimeNs != nullptr) | |||||
{ | |||||
context.systemTime = (int64_t) *hostTimeNs; | |||||
jassert (context.systemTime >= 0); | |||||
context.state |= ProcessContext::kSystemTimeValid; | |||||
} | |||||
} | } | ||||
//============================================================================== | //============================================================================== | ||||
@@ -3345,7 +3355,7 @@ private: | |||||
void updateTimingInformation (Vst::ProcessData& destination, double processSampleRate) | void updateTimingInformation (Vst::ProcessData& destination, double processSampleRate) | ||||
{ | { | ||||
toProcessContext (timingInfo, getPlayHead(), processSampleRate); | |||||
toProcessContext (timingInfo, getPlayHead(), processSampleRate, getHostTimeNs()); | |||||
destination.processContext = &timingInfo; | destination.processContext = &timingInfo; | ||||
} | } | ||||
@@ -2367,7 +2367,6 @@ private: | |||||
if (currentPlayHead->getCurrentPosition (position)) | if (currentPlayHead->getCurrentPosition (position)) | ||||
{ | { | ||||
vstHostTime.samplePos = (double) position.timeInSamples; | vstHostTime.samplePos = (double) position.timeInSamples; | ||||
vstHostTime.tempo = position.bpm; | vstHostTime.tempo = position.bpm; | ||||
vstHostTime.timeSigNumerator = position.timeSigNumerator; | vstHostTime.timeSigNumerator = position.timeSigNumerator; | ||||
@@ -2375,9 +2374,19 @@ private: | |||||
vstHostTime.ppqPos = position.ppqPosition; | vstHostTime.ppqPos = position.ppqPosition; | ||||
vstHostTime.barStartPos = position.ppqPositionOfLastBarStart; | vstHostTime.barStartPos = position.ppqPositionOfLastBarStart; | ||||
vstHostTime.flags |= Vst2::kVstTempoValid | vstHostTime.flags |= Vst2::kVstTempoValid | ||||
| Vst2::kVstTimeSigValid | |||||
| Vst2::kVstPpqPosValid | |||||
| Vst2::kVstBarsValid; | |||||
| Vst2::kVstTimeSigValid | |||||
| Vst2::kVstPpqPosValid | |||||
| Vst2::kVstBarsValid; | |||||
if (const auto* hostTimeNs = getHostTimeNs()) | |||||
{ | |||||
vstHostTime.nanoSeconds = (double) *hostTimeNs; | |||||
vstHostTime.flags |= Vst2::kVstNanosValid; | |||||
} | |||||
else | |||||
{ | |||||
vstHostTime.flags &= ~Vst2::kVstNanosValid; | |||||
} | |||||
int32 newTransportFlags = 0; | int32 newTransportFlags = 0; | ||||
if (position.isPlaying) newTransportFlags |= Vst2::kVstTransportPlaying; | if (position.isPlaying) newTransportFlags |= Vst2::kVstTransportPlaying; | ||||
@@ -2389,28 +2398,22 @@ private: | |||||
else | else | ||||
vstHostTime.flags &= ~Vst2::kVstTransportChanged; | vstHostTime.flags &= ~Vst2::kVstTransportChanged; | ||||
struct OptionalFrameRate | |||||
{ | |||||
bool valid; | |||||
Vst2::VstInt32 rate; | |||||
}; | |||||
const auto optionalFrameRate = [&fr = position.frameRate]() -> OptionalFrameRate | |||||
const auto optionalFrameRate = [&fr = position.frameRate]() -> Optional<Vst2::VstInt32> | |||||
{ | { | ||||
switch (fr.getBaseRate()) | switch (fr.getBaseRate()) | ||||
{ | { | ||||
case 24: return { true, fr.isPullDown() ? Vst2::kVstSmpte239fps : Vst2::kVstSmpte24fps }; | |||||
case 25: return { true, fr.isPullDown() ? Vst2::kVstSmpte249fps : Vst2::kVstSmpte25fps }; | |||||
case 30: return { true, fr.isPullDown() ? (fr.isDrop() ? Vst2::kVstSmpte2997dfps : Vst2::kVstSmpte2997fps) | |||||
: (fr.isDrop() ? Vst2::kVstSmpte30dfps : Vst2::kVstSmpte30fps) }; | |||||
case 60: return { true, fr.isPullDown() ? Vst2::kVstSmpte599fps : Vst2::kVstSmpte60fps }; | |||||
case 24: return fr.isPullDown() ? Vst2::kVstSmpte239fps : Vst2::kVstSmpte24fps; | |||||
case 25: return fr.isPullDown() ? Vst2::kVstSmpte249fps : Vst2::kVstSmpte25fps; | |||||
case 30: return fr.isPullDown() ? (fr.isDrop() ? Vst2::kVstSmpte2997dfps : Vst2::kVstSmpte2997fps) | |||||
: (fr.isDrop() ? Vst2::kVstSmpte30dfps : Vst2::kVstSmpte30fps); | |||||
case 60: return fr.isPullDown() ? Vst2::kVstSmpte599fps : Vst2::kVstSmpte60fps; | |||||
} | } | ||||
return { false, Vst2::VstSmpteFrameRate{} }; | |||||
return {}; | |||||
}(); | }(); | ||||
vstHostTime.flags |= optionalFrameRate.valid ? Vst2::kVstSmpteValid : 0; | |||||
vstHostTime.smpteFrameRate = optionalFrameRate.rate; | |||||
vstHostTime.flags |= optionalFrameRate ? Vst2::kVstSmpteValid : 0; | |||||
vstHostTime.smpteFrameRate = optionalFrameRate.orFallback (Vst2::VstSmpteFrameRate{}); | |||||
vstHostTime.smpteOffset = (int32) (position.timeInSeconds * 80.0 * position.frameRate.getEffectiveRate() + 0.5); | vstHostTime.smpteOffset = (int32) (position.timeInSeconds * 80.0 * position.frameRate.getEffectiveRate() + 0.5); | ||||
if (position.isLooping) | if (position.isLooping) | ||||
@@ -33,6 +33,7 @@ | |||||
#include "juce_audio_processors.h" | #include "juce_audio_processors.h" | ||||
#include <juce_gui_extra/juce_gui_extra.h> | #include <juce_gui_extra/juce_gui_extra.h> | ||||
#include <juce_core/containers/juce_Optional.h> | |||||
//============================================================================== | //============================================================================== | ||||
#if JUCE_MAC | #if JUCE_MAC | ||||
@@ -1125,6 +1125,51 @@ public: | |||||
*/ | */ | ||||
virtual void setPlayHead (AudioPlayHead* newPlayHead); | virtual void setPlayHead (AudioPlayHead* newPlayHead); | ||||
//============================================================================== | |||||
/** Hosts may call this function to supply the system time corresponding to the | |||||
current audio buffer. | |||||
If you want to set a valid time, pass a pointer to a uint64_t holding the current time. The | |||||
value will be copied into the AudioProcessor instance without any allocation/deallocation. | |||||
If you want to clear any stored host time, pass nullptr. | |||||
Calls to this function must be synchronised (i.e. not simultaneous) with the audio callback. | |||||
@code | |||||
const auto currentHostTime = computeHostTimeNanos(); | |||||
processor.setHostTimeNanos (¤tHostTime); // Set a valid host time | |||||
// ...call processBlock etc. | |||||
processor.setHostTimeNanos (nullptr); // Clear host time | |||||
@endcode | |||||
*/ | |||||
void setHostTimeNanos (const uint64_t* hostTimeIn) | |||||
{ | |||||
hasHostTime = hostTimeIn != nullptr; | |||||
hostTime = hasHostTime ? *hostTimeIn : 0; | |||||
} | |||||
/** The plugin may call this function inside the processBlock function (and only there!) | |||||
to find the timestamp associated with the current audio block. | |||||
If a timestamp is available, this will return a pointer to that timestamp. You should | |||||
immediately copy the pointed-to value and use that in any following code. Do *not* free | |||||
any pointer returned by this function. | |||||
If no timestamp is provided, this will return nullptr. | |||||
@code | |||||
void processBlock (AudioBuffer<float>&, MidiBuffer&) override | |||||
{ | |||||
if (auto* timestamp = getHostTimeNs()) | |||||
{ | |||||
// Use *timestamp here to compensate for callback jitter etc. | |||||
} | |||||
} | |||||
@endcode | |||||
*/ | |||||
const uint64_t* getHostTimeNs() const { return hasHostTime ? &hostTime : nullptr; } | |||||
//============================================================================== | //============================================================================== | ||||
/** This is called by the processor to specify its details before being played. Use this | /** This is called by the processor to specify its details before being played. Use this | ||||
version of the function if you are not interested in any sidechain and/or aux buses | version of the function if you are not interested in any sidechain and/or aux buses | ||||
@@ -1473,6 +1518,9 @@ private: | |||||
AudioProcessorParameterGroup parameterTree; | AudioProcessorParameterGroup parameterTree; | ||||
Array<AudioProcessorParameter*> flatParameterList; | Array<AudioProcessorParameter*> flatParameterList; | ||||
uint64_t hostTime = 0; | |||||
bool hasHostTime = false; | |||||
AudioProcessorParameter* getParamChecked (int) const; | AudioProcessorParameter* getParamChecked (int) const; | ||||
#if JUCE_DEBUG | #if JUCE_DEBUG | ||||
@@ -37,10 +37,11 @@ struct GraphRenderSequence | |||||
FloatType** audioBuffers; | FloatType** audioBuffers; | ||||
MidiBuffer* midiBuffers; | MidiBuffer* midiBuffers; | ||||
AudioPlayHead* audioPlayHead; | AudioPlayHead* audioPlayHead; | ||||
Optional<uint64_t> hostTimeNs; | |||||
int numSamples; | int numSamples; | ||||
}; | }; | ||||
void perform (AudioBuffer<FloatType>& buffer, MidiBuffer& midiMessages, AudioPlayHead* audioPlayHead) | |||||
void perform (AudioBuffer<FloatType>& buffer, MidiBuffer& midiMessages, AudioPlayHead* audioPlayHead, Optional<uint64_t> hostTimeNs) | |||||
{ | { | ||||
auto numSamples = buffer.getNumSamples(); | auto numSamples = buffer.getNumSamples(); | ||||
auto maxSamples = renderingBuffer.getNumSamples(); | auto maxSamples = renderingBuffer.getNumSamples(); | ||||
@@ -57,7 +58,9 @@ struct GraphRenderSequence | |||||
midiChunk.clear(); | midiChunk.clear(); | ||||
midiChunk.addEvents (midiMessages, chunkStartSample, chunkSize, -chunkStartSample); | midiChunk.addEvents (midiMessages, chunkStartSample, chunkSize, -chunkStartSample); | ||||
perform (audioChunk, midiChunk, audioPlayHead); | |||||
// Splitting up the buffer like this will cause the play head and host time to be | |||||
// invalid for all but the first chunk... | |||||
perform (audioChunk, midiChunk, audioPlayHead, hostTimeNs); | |||||
chunkStartSample += maxSamples; | chunkStartSample += maxSamples; | ||||
} | } | ||||
@@ -72,7 +75,7 @@ struct GraphRenderSequence | |||||
currentMidiOutputBuffer.clear(); | currentMidiOutputBuffer.clear(); | ||||
{ | { | ||||
const Context context { renderingBuffer.getArrayOfWritePointers(), midiBuffers.begin(), audioPlayHead, numSamples }; | |||||
const Context context { renderingBuffer.getArrayOfWritePointers(), midiBuffers.begin(), audioPlayHead, hostTimeNs, numSamples }; | |||||
for (auto* op : renderOps) | for (auto* op : renderOps) | ||||
op->perform (context); | op->perform (context); | ||||
@@ -257,6 +260,7 @@ private: | |||||
void perform (const Context& c) override | void perform (const Context& c) override | ||||
{ | { | ||||
processor.setPlayHead (c.audioPlayHead); | processor.setPlayHead (c.audioPlayHead); | ||||
processor.setHostTimeNanos (c.hostTimeNs.hasValue() ? &(*c.hostTimeNs) : nullptr); | |||||
for (int i = 0; i < totalChans; ++i) | for (int i = 0; i < totalChans; ++i) | ||||
audioChannels[i] = c.audioBuffers[audioChannelsToUse.getUnchecked (i)]; | audioChannels[i] = c.audioBuffers[audioChannelsToUse.getUnchecked (i)]; | ||||
@@ -278,6 +282,8 @@ private: | |||||
buffer.clear(); | buffer.clear(); | ||||
else | else | ||||
callProcess (buffer, c.midiBuffers[midiBufferToUse]); | callProcess (buffer, c.midiBuffers[midiBufferToUse]); | ||||
processor.setHostTimeNanos (nullptr); | |||||
} | } | ||||
void callProcess (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) | void callProcess (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) | ||||
@@ -1389,6 +1395,14 @@ static void processBlockForBuffer (AudioBuffer<FloatType>& buffer, MidiBuffer& m | |||||
std::unique_ptr<SequenceType>& renderSequence, | std::unique_ptr<SequenceType>& renderSequence, | ||||
std::atomic<bool>& isPrepared) | std::atomic<bool>& isPrepared) | ||||
{ | { | ||||
const auto getHostTime = [&]() -> Optional<uint64_t> | |||||
{ | |||||
if (auto* nanos = graph.getHostTimeNs()) | |||||
return *nanos; | |||||
return nullopt; | |||||
}; | |||||
if (graph.isNonRealtime()) | if (graph.isNonRealtime()) | ||||
{ | { | ||||
while (! isPrepared) | while (! isPrepared) | ||||
@@ -1397,7 +1411,7 @@ static void processBlockForBuffer (AudioBuffer<FloatType>& buffer, MidiBuffer& m | |||||
const ScopedLock sl (graph.getCallbackLock()); | const ScopedLock sl (graph.getCallbackLock()); | ||||
if (renderSequence != nullptr) | if (renderSequence != nullptr) | ||||
renderSequence->perform (buffer, midiMessages, graph.getPlayHead()); | |||||
renderSequence->perform (buffer, midiMessages, graph.getPlayHead(), getHostTime()); | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
@@ -1406,7 +1420,7 @@ static void processBlockForBuffer (AudioBuffer<FloatType>& buffer, MidiBuffer& m | |||||
if (isPrepared) | if (isPrepared) | ||||
{ | { | ||||
if (renderSequence != nullptr) | if (renderSequence != nullptr) | ||||
renderSequence->perform (buffer, midiMessages, graph.getPlayHead()); | |||||
renderSequence->perform (buffer, midiMessages, graph.getPlayHead(), getHostTime()); | |||||
} | } | ||||
else | else | ||||
{ | { | ||||
@@ -226,11 +226,12 @@ void AudioProcessorPlayer::setMidiOutput (MidiOutput* midiOutputToUse) | |||||
} | } | ||||
//============================================================================== | //============================================================================== | ||||
void AudioProcessorPlayer::audioDeviceIOCallback (const float** const inputChannelData, | |||||
const int numInputChannels, | |||||
float** const outputChannelData, | |||||
const int numOutputChannels, | |||||
const int numSamples) | |||||
void AudioProcessorPlayer::audioDeviceIOCallbackWithContext (const float** const inputChannelData, | |||||
const int numInputChannels, | |||||
float** const outputChannelData, | |||||
const int numOutputChannels, | |||||
const int numSamples, | |||||
const AudioIODeviceCallbackContext& context) | |||||
{ | { | ||||
const ScopedLock sl (lock); | const ScopedLock sl (lock); | ||||
@@ -259,6 +260,16 @@ void AudioProcessorPlayer::audioDeviceIOCallback (const float** const inputChann | |||||
const ScopedLock sl2 (processor->getCallbackLock()); | const ScopedLock sl2 (processor->getCallbackLock()); | ||||
processor->setHostTimeNanos (context.hostTimeNs); | |||||
struct AtEndOfScope | |||||
{ | |||||
~AtEndOfScope() { proc.setHostTimeNanos (nullptr); } | |||||
AudioProcessor& proc; | |||||
}; | |||||
const AtEndOfScope scope { *processor }; | |||||
if (! processor->isSuspended()) | if (! processor->isSuspended()) | ||||
{ | { | ||||
if (processor->isUsingDoublePrecision()) | if (processor->isUsingDoublePrecision()) | ||||
@@ -85,7 +85,7 @@ public: | |||||
//============================================================================== | //============================================================================== | ||||
/** @internal */ | /** @internal */ | ||||
void audioDeviceIOCallback (const float**, int, float**, int, int) override; | |||||
void audioDeviceIOCallbackWithContext (const float**, int, float**, int, int, const AudioIODeviceCallbackContext&) override; | |||||
/** @internal */ | /** @internal */ | ||||
void audioDeviceAboutToStart (AudioIODevice*) override; | void audioDeviceAboutToStart (AudioIODevice*) override; | ||||
/** @internal */ | /** @internal */ | ||||