Browse Source

Converted AudioSampleBuffer into a templated class that can use either float or double types. Used this to implement 64-bit audio plugin support in VST and AU

tags/2021-05-28
jules 10 years ago
parent
commit
c562cfc3cc
27 changed files with 1714 additions and 1105 deletions
  1. +42
    -13
      examples/audio plugin demo/Source/PluginProcessor.cpp
  2. +21
    -2
      examples/audio plugin demo/Source/PluginProcessor.h
  3. +2
    -1
      examples/audio plugin host/Source/GraphEditorPanel.cpp
  4. +1
    -0
      examples/audio plugin host/Source/GraphEditorPanel.h
  5. +37
    -0
      examples/audio plugin host/Source/MainHostWindow.cpp
  6. +4
    -0
      examples/audio plugin host/Source/MainHostWindow.h
  7. +0
    -673
      modules/juce_audio_basics/buffers/juce_AudioSampleBuffer.cpp
  8. +639
    -92
      modules/juce_audio_basics/buffers/juce_AudioSampleBuffer.h
  9. +0
    -1
      modules/juce_audio_basics/juce_audio_basics.cpp
  10. +1
    -1
      modules/juce_audio_basics/juce_audio_basics.h
  11. +37
    -6
      modules/juce_audio_basics/synthesisers/juce_Synthesiser.cpp
  12. +24
    -4
      modules/juce_audio_basics/synthesisers/juce_Synthesiser.h
  13. +2
    -2
      modules/juce_audio_devices/native/juce_mac_CoreAudio.cpp
  14. +1
    -1
      modules/juce_audio_formats/codecs/juce_CoreAudioFormat.cpp
  15. +3
    -3
      modules/juce_audio_plugin_client/AU/juce_AU_Wrapper.mm
  16. +88
    -26
      modules/juce_audio_plugin_client/VST/juce_VST_Wrapper.cpp
  17. +122
    -54
      modules/juce_audio_plugin_client/VST3/juce_VST3_Wrapper.cpp
  18. +1
    -1
      modules/juce_audio_processors/format_types/juce_AudioUnitPluginFormat.mm
  19. +43
    -19
      modules/juce_audio_processors/format_types/juce_VST3Common.h
  20. +47
    -31
      modules/juce_audio_processors/format_types/juce_VST3PluginFormat.cpp
  21. +145
    -104
      modules/juce_audio_processors/format_types/juce_VSTPluginFormat.cpp
  22. +29
    -2
      modules/juce_audio_processors/processors/juce_AudioProcessor.cpp
  23. +113
    -2
      modules/juce_audio_processors/processors/juce_AudioProcessor.h
  24. +235
    -54
      modules/juce_audio_processors/processors/juce_AudioProcessorGraph.cpp
  25. +18
    -7
      modules/juce_audio_processors/processors/juce_AudioProcessorGraph.h
  26. +41
    -2
      modules/juce_audio_utils/players/juce_AudioProcessorPlayer.cpp
  27. +18
    -4
      modules/juce_audio_utils/players/juce_AudioProcessorPlayer.h

+ 42
- 13
examples/audio plugin demo/Source/PluginProcessor.cpp View File

@@ -85,7 +85,20 @@ public:
// not interested in controllers in this case.
}
void renderNextBlock (AudioSampleBuffer& outputBuffer, int startSample, int numSamples) override
void renderNextBlock (AudioBuffer<float>& outputBuffer, int startSample, int numSamples) override
{
processBlock (outputBuffer, startSample, numSamples);
}
void renderNextBlock (AudioBuffer<double>& outputBuffer, int startSample, int numSamples) override
{
processBlock (outputBuffer, startSample, numSamples);
}
private:
template <typename FloatType>
void processBlock (AudioBuffer<FloatType>& outputBuffer, int startSample, int numSamples)
{
if (angleDelta != 0.0)
{
@@ -93,7 +106,8 @@ public:
{
while (--numSamples >= 0)
{
const float currentSample = (float) (sin (currentAngle) * level * tailOff);
const FloatType currentSample =
static_cast<FloatType> (std::sin (currentAngle) * level * tailOff);
for (int i = outputBuffer.getNumChannels(); --i >= 0;)
outputBuffer.addSample (i, startSample, currentSample);
@@ -116,7 +130,7 @@ public:
{
while (--numSamples >= 0)
{
const float currentSample = (float) (sin (currentAngle) * level);
const FloatType currentSample = static_cast<FloatType> (std::sin (currentAngle) * level);
for (int i = outputBuffer.getNumChannels(); --i >= 0;)
outputBuffer.addSample (i, startSample, currentSample);
@@ -128,7 +142,6 @@ public:
}
}
private:
double currentAngle, angleDelta, level, tailOff;
};
@@ -183,7 +196,6 @@ const float defaultDelay = 0.5f;
//==============================================================================
JuceDemoPluginAudioProcessor::JuceDemoPluginAudioProcessor()
: delayBuffer (2, 12000)
{
// Set up our parameters. The base class will delete them for us.
addParameter (gain = new FloatParameter (defaultGain, "Gain"));
@@ -213,7 +225,19 @@ void JuceDemoPluginAudioProcessor::prepareToPlay (double newSampleRate, int /*sa
// initialisation that you need..
synth.setCurrentPlaybackSampleRate (newSampleRate);
keyboardState.reset();
delayBuffer.clear();
if (isUsingDoublePrecision())
{
delayBufferDouble.setSize (2, 12000);
delayBufferFloat.setSize (1, 1);
}
else
{
delayBufferFloat.setSize (2, 12000);
delayBufferDouble.setSize (1, 1);
}
reset();
}
void JuceDemoPluginAudioProcessor::releaseResources()
@@ -227,17 +251,21 @@ void JuceDemoPluginAudioProcessor::reset()
{
// Use this method as the place to clear any delay lines, buffers, etc, as it
// means there's been a break in the audio's continuity.
delayBuffer.clear();
delayBufferFloat.clear();
delayBufferDouble.clear();
}
void JuceDemoPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
template <typename FloatType>
void JuceDemoPluginAudioProcessor::process (AudioBuffer<FloatType>& buffer,
MidiBuffer& midiMessages,
AudioBuffer<FloatType>& delayBuffer)
{
const int numSamples = buffer.getNumSamples();
int channel, dp = 0;
// Go through the incoming data, and apply our gain to it...
for (channel = 0; channel < getNumInputChannels(); ++channel)
buffer.applyGain (channel, 0, buffer.getNumSamples(), gain->getValue());
buffer.applyGain (channel, 0, buffer.getNumSamples(), static_cast<FloatType> (gain->getValue()));
// Now pass any incoming midi messages to our keyboard state object, and let it
// add messages to the buffer if the user is clicking on the on-screen keys
@@ -249,15 +277,16 @@ void JuceDemoPluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, Midi
// Apply our delay effect to the new output..
for (channel = 0; channel < getNumInputChannels(); ++channel)
{
float* channelData = buffer.getWritePointer (channel);
float* delayData = delayBuffer.getWritePointer (jmin (channel, delayBuffer.getNumChannels() - 1));
FloatType* channelData = buffer.getWritePointer (channel);
FloatType* delayData = delayBuffer.getWritePointer (jmin (channel, delayBuffer.getNumChannels() - 1));
dp = delayPosition;
for (int i = 0; i < numSamples; ++i)
{
const float in = channelData[i];
const FloatType in = channelData[i];
channelData[i] += delayData[dp];
delayData[dp] = (delayData[dp] + in) * delay->getValue();
delayData[dp] = (delayData[dp] + in) * static_cast<FloatType> (delay->getValue());
if (++dp >= delayBuffer.getNumSamples())
dp = 0;
}


+ 21
- 2
examples/audio plugin demo/Source/PluginProcessor.h View File

@@ -28,13 +28,28 @@ public:
//==============================================================================
void prepareToPlay (double sampleRate, int samplesPerBlock) override;
void releaseResources() override;
void processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) override;
void reset() override;
//==============================================================================
void processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) override
{
jassert (! isUsingDoublePrecision());
process (buffer, midiMessages, delayBufferFloat);
}
void processBlock (AudioBuffer<double>& buffer, MidiBuffer& midiMessages) override
{
jassert (isUsingDoublePrecision());
process (buffer, midiMessages, delayBufferDouble);
}
//==============================================================================
bool hasEditor() const override { return true; }
AudioProcessorEditor* createEditor() override;
//==============================================================================
bool supportsDoublePrecisionProcessing() const override { return true; }
//==============================================================================
const String getName() const override { return JucePlugin_Name; }
@@ -83,7 +98,11 @@ public:
private:
//==============================================================================
AudioSampleBuffer delayBuffer;
template <typename floatType>
void process (AudioBuffer<floatType>& buffer, MidiBuffer& midiMessages, AudioBuffer<floatType>& delayBuffer);
AudioBuffer<float> delayBufferFloat;
AudioBuffer<double> delayBufferDouble;
int delayPosition;
// the synth!


+ 2
- 1
examples/audio plugin host/Source/GraphEditorPanel.cpp View File

@@ -1087,7 +1087,8 @@ private:
//==============================================================================
GraphDocumentComponent::GraphDocumentComponent (AudioPluginFormatManager& formatManager,
AudioDeviceManager* deviceManager_)
: graph (formatManager), deviceManager (deviceManager_)
: graph (formatManager), deviceManager (deviceManager_),
graphPlayer (getAppProperties().getUserSettings()->getBoolValue ("doublePrecisionProcessing", false))
{
addAndMakeVisible (graphPanel = new GraphEditorPanel (graph));


+ 1
- 0
examples/audio plugin host/Source/GraphEditorPanel.h View File

@@ -88,6 +88,7 @@ public:
//==============================================================================
void createNewPlugin (const PluginDescription* desc, int x, int y);
inline void setDoublePrecision (bool doublePrecision) { graphPlayer.setDoublePrecisionProcessing (doublePrecision); }
//==============================================================================
FilterGraph graph;


+ 37
- 0
examples/audio plugin host/Source/MainHostWindow.cpp View File

@@ -240,6 +240,7 @@ PopupMenu MainHostWindow::getMenuForIndex (int topLevelMenuIndex, const String&
menu.addSeparator();
menu.addCommandItem (&getCommandManager(), CommandIDs::showAudioSettings);
menu.addCommandItem (&getCommandManager(), CommandIDs::toggleDoublePrecision);
menu.addSeparator();
menu.addCommandItem (&getCommandManager(), CommandIDs::aboutBox);
@@ -331,6 +332,7 @@ void MainHostWindow::getAllCommands (Array <CommandID>& commands)
CommandIDs::saveAs,
CommandIDs::showPluginListEditor,
CommandIDs::showAudioSettings,
CommandIDs::toggleDoublePrecision,
CommandIDs::aboutBox,
CommandIDs::allWindowsForward
};
@@ -376,6 +378,10 @@ void MainHostWindow::getCommandInfo (const CommandID commandID, ApplicationComma
result.addDefaultKeypress ('a', ModifierKeys::commandModifier);
break;
case CommandIDs::toggleDoublePrecision:
updatePrecisionMenuItem (result);
break;
case CommandIDs::aboutBox:
result.setInfo ("About...", String::empty, category, 0);
break;
@@ -427,6 +433,23 @@ bool MainHostWindow::perform (const InvocationInfo& info)
showAudioSettings();
break;
case CommandIDs::toggleDoublePrecision:
if (PropertiesFile* props = getAppProperties().getUserSettings())
{
bool newIsDoublePrecision = ! isDoublePrecisionProcessing();
props->setValue ("doublePrecisionProcessing", var (newIsDoublePrecision));
{
ApplicationCommandInfo cmdInfo (info.commandID);
updatePrecisionMenuItem (cmdInfo);
menuItemsChanged();
}
if (graphEditor != nullptr)
graphEditor->setDoublePrecision (newIsDoublePrecision);
}
break;
case CommandIDs::aboutBox:
// TODO
break;
@@ -524,3 +547,17 @@ GraphDocumentComponent* MainHostWindow::getGraphEditor() const
{
return dynamic_cast <GraphDocumentComponent*> (getContentComponent());
}
bool MainHostWindow::isDoublePrecisionProcessing()
{
if (PropertiesFile* props = getAppProperties().getUserSettings())
return props->getBoolValue ("doublePrecisionProcessing", false);
return false;
}
void MainHostWindow::updatePrecisionMenuItem (ApplicationCommandInfo& info)
{
info.setInfo ("Double floating point precision rendering", String::empty, "General", 0);
info.setTicked (isDoublePrecisionProcessing());
}

+ 4
- 0
examples/audio plugin host/Source/MainHostWindow.h View File

@@ -40,6 +40,7 @@ namespace CommandIDs
static const int showAudioSettings = 0x30200;
static const int aboutBox = 0x30300;
static const int allWindowsForward = 0x30400;
static const int toggleDoublePrecision = 0x30500;
}
ApplicationCommandManager& getCommandManager();
@@ -86,6 +87,9 @@ public:
GraphDocumentComponent* getGraphEditor() const;
bool isDoublePrecisionProcessing();
void updatePrecisionMenuItem (ApplicationCommandInfo& info);
private:
//==============================================================================
AudioDeviceManager deviceManager;


+ 0
- 673
modules/juce_audio_basics/buffers/juce_AudioSampleBuffer.cpp View File

@@ -1,673 +0,0 @@
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2015 - ROLI Ltd.
Permission is granted to use this software under the terms of either:
a) the GPL v2 (or any later version)
b) the Affero GPL v3
Details of these licenses can be found at: www.gnu.org/licenses
JUCE is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
------------------------------------------------------------------------------
To release a closed-source product which uses JUCE, commercial licenses are
available: visit www.juce.com for more information.
==============================================================================
*/
AudioSampleBuffer::AudioSampleBuffer() noexcept
: numChannels (0), size (0), allocatedBytes (0),
channels (static_cast<float**> (preallocatedChannelSpace)),
isClear (false)
{
}
AudioSampleBuffer::AudioSampleBuffer (const int numChans,
const int numSamples) noexcept
: numChannels (numChans),
size (numSamples)
{
jassert (numSamples >= 0);
jassert (numChans >= 0);
allocateData();
}
AudioSampleBuffer::AudioSampleBuffer (const AudioSampleBuffer& other) noexcept
: numChannels (other.numChannels),
size (other.size),
allocatedBytes (other.allocatedBytes)
{
if (allocatedBytes == 0)
{
allocateChannels (other.channels, 0);
}
else
{
allocateData();
if (other.isClear)
{
clear();
}
else
{
for (int i = 0; i < numChannels; ++i)
FloatVectorOperations::copy (channels[i], other.channels[i], size);
}
}
}
void AudioSampleBuffer::allocateData()
{
const size_t channelListSize = sizeof (float*) * (size_t) (numChannels + 1);
allocatedBytes = (size_t) numChannels * (size_t) size * sizeof (float) + channelListSize + 32;
allocatedData.malloc (allocatedBytes);
channels = reinterpret_cast<float**> (allocatedData.getData());
float* chan = (float*) (allocatedData + channelListSize);
for (int i = 0; i < numChannels; ++i)
{
channels[i] = chan;
chan += size;
}
channels [numChannels] = nullptr;
isClear = false;
}
AudioSampleBuffer::AudioSampleBuffer (float* const* dataToReferTo,
const int numChans,
const int numSamples) noexcept
: numChannels (numChans),
size (numSamples),
allocatedBytes (0)
{
jassert (dataToReferTo != nullptr);
jassert (numChans >= 0 && numSamples >= 0);
allocateChannels (dataToReferTo, 0);
}
AudioSampleBuffer::AudioSampleBuffer (float* const* dataToReferTo,
const int numChans,
const int startSample,
const int numSamples) noexcept
: numChannels (numChans),
size (numSamples),
allocatedBytes (0),
isClear (false)
{
jassert (dataToReferTo != nullptr);
jassert (numChans >= 0 && startSample >= 0 && numSamples >= 0);
allocateChannels (dataToReferTo, startSample);
}
void AudioSampleBuffer::setDataToReferTo (float** dataToReferTo,
const int newNumChannels,
const int newNumSamples) noexcept
{
jassert (dataToReferTo != nullptr);
jassert (newNumChannels >= 0 && newNumSamples >= 0);
if (allocatedBytes != 0)
{
allocatedBytes = 0;
allocatedData.free();
}
numChannels = newNumChannels;
size = newNumSamples;
allocateChannels (dataToReferTo, 0);
jassert (! isClear);
}
void AudioSampleBuffer::allocateChannels (float* const* const dataToReferTo, int offset)
{
jassert (offset >= 0);
// (try to avoid doing a malloc here, as that'll blow up things like Pro-Tools)
if (numChannels < (int) numElementsInArray (preallocatedChannelSpace))
{
channels = static_cast<float**> (preallocatedChannelSpace);
}
else
{
allocatedData.malloc ((size_t) numChannels + 1, sizeof (float*));
channels = reinterpret_cast<float**> (allocatedData.getData());
}
for (int i = 0; i < numChannels; ++i)
{
// you have to pass in the same number of valid pointers as numChannels
jassert (dataToReferTo[i] != nullptr);
channels[i] = dataToReferTo[i] + offset;
}
channels [numChannels] = nullptr;
isClear = false;
}
AudioSampleBuffer& AudioSampleBuffer::operator= (const AudioSampleBuffer& other) noexcept
{
if (this != &other)
{
setSize (other.getNumChannels(), other.getNumSamples(), false, false, false);
if (other.isClear)
{
clear();
}
else
{
isClear = false;
for (int i = 0; i < numChannels; ++i)
FloatVectorOperations::copy (channels[i], other.channels[i], size);
}
}
return *this;
}
AudioSampleBuffer::~AudioSampleBuffer() noexcept
{
}
void AudioSampleBuffer::setSize (const int newNumChannels,
const int newNumSamples,
const bool keepExistingContent,
const bool clearExtraSpace,
const bool avoidReallocating) noexcept
{
jassert (newNumChannels >= 0);
jassert (newNumSamples >= 0);
if (newNumSamples != size || newNumChannels != numChannels)
{
const size_t allocatedSamplesPerChannel = ((size_t) newNumSamples + 3) & ~3u;
const size_t channelListSize = ((sizeof (float*) * (size_t) (newNumChannels + 1)) + 15) & ~15u;
const size_t newTotalBytes = ((size_t) newNumChannels * (size_t) allocatedSamplesPerChannel * sizeof (float))
+ channelListSize + 32;
if (keepExistingContent)
{
HeapBlock<char, true> newData;
newData.allocate (newTotalBytes, clearExtraSpace || isClear);
const size_t numSamplesToCopy = (size_t) jmin (newNumSamples, size);
float** const newChannels = reinterpret_cast<float**> (newData.getData());
float* newChan = reinterpret_cast<float*> (newData + channelListSize);
for (int j = 0; j < newNumChannels; ++j)
{
newChannels[j] = newChan;
newChan += allocatedSamplesPerChannel;
}
if (! isClear)
{
const int numChansToCopy = jmin (numChannels, newNumChannels);
for (int i = 0; i < numChansToCopy; ++i)
FloatVectorOperations::copy (newChannels[i], channels[i], (int) numSamplesToCopy);
}
allocatedData.swapWith (newData);
allocatedBytes = newTotalBytes;
channels = newChannels;
}
else
{
if (avoidReallocating && allocatedBytes >= newTotalBytes)
{
if (clearExtraSpace || isClear)
allocatedData.clear (newTotalBytes);
}
else
{
allocatedBytes = newTotalBytes;
allocatedData.allocate (newTotalBytes, clearExtraSpace || isClear);
channels = reinterpret_cast<float**> (allocatedData.getData());
}
float* chan = reinterpret_cast<float*> (allocatedData + channelListSize);
for (int i = 0; i < newNumChannels; ++i)
{
channels[i] = chan;
chan += allocatedSamplesPerChannel;
}
}
channels [newNumChannels] = 0;
size = newNumSamples;
numChannels = newNumChannels;
}
}
void AudioSampleBuffer::clear() noexcept
{
if (! isClear)
{
for (int i = 0; i < numChannels; ++i)
FloatVectorOperations::clear (channels[i], size);
isClear = true;
}
}
void AudioSampleBuffer::clear (const int startSample,
const int numSamples) noexcept
{
jassert (startSample >= 0 && startSample + numSamples <= size);
if (! isClear)
{
if (startSample == 0 && numSamples == size)
isClear = true;
for (int i = 0; i < numChannels; ++i)
FloatVectorOperations::clear (channels[i] + startSample, numSamples);
}
}
void AudioSampleBuffer::clear (const int channel,
const int startSample,
const int numSamples) noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (startSample >= 0 && startSample + numSamples <= size);
if (! isClear)
FloatVectorOperations::clear (channels [channel] + startSample, numSamples);
}
float AudioSampleBuffer::getSample (int channel, int index) const noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (isPositiveAndBelow (index, size));
return *(channels [channel] + index);
}
void AudioSampleBuffer::setSample (int channel, int index, float newValue) noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (isPositiveAndBelow (index, size));
*(channels [channel] + index) = newValue;
isClear = false;
}
void AudioSampleBuffer::addSample (int channel, int index, float valueToAdd) noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (isPositiveAndBelow (index, size));
*(channels [channel] + index) += valueToAdd;
isClear = false;
}
void AudioSampleBuffer::applyGain (const int channel,
const int startSample,
int numSamples,
const float gain) noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (startSample >= 0 && startSample + numSamples <= size);
if (gain != 1.0f && ! isClear)
{
float* const d = channels [channel] + startSample;
if (gain == 0.0f)
FloatVectorOperations::clear (d, numSamples);
else
FloatVectorOperations::multiply (d, gain, numSamples);
}
}
void AudioSampleBuffer::applyGainRamp (const int channel,
const int startSample,
int numSamples,
float startGain,
float endGain) noexcept
{
if (! isClear)
{
if (startGain == endGain)
{
applyGain (channel, startSample, numSamples, startGain);
}
else
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (startSample >= 0 && startSample + numSamples <= size);
const float increment = (endGain - startGain) / numSamples;
float* d = channels [channel] + startSample;
while (--numSamples >= 0)
{
*d++ *= startGain;
startGain += increment;
}
}
}
}
void AudioSampleBuffer::applyGain (int startSample, int numSamples, float gain) noexcept
{
for (int i = 0; i < numChannels; ++i)
applyGain (i, startSample, numSamples, gain);
}
void AudioSampleBuffer::applyGain (const float gain) noexcept
{
applyGain (0, size, gain);
}
void AudioSampleBuffer::applyGainRamp (int startSample, int numSamples,
float startGain, float endGain) noexcept
{
for (int i = 0; i < numChannels; ++i)
applyGainRamp (i, startSample, numSamples, startGain, endGain);
}
void AudioSampleBuffer::addFrom (const int destChannel,
const int destStartSample,
const AudioSampleBuffer& source,
const int sourceChannel,
const int sourceStartSample,
int numSamples,
const float gain) noexcept
{
jassert (&source != this || sourceChannel != destChannel);
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (destStartSample >= 0 && destStartSample + numSamples <= size);
jassert (isPositiveAndBelow (sourceChannel, source.numChannels));
jassert (sourceStartSample >= 0 && sourceStartSample + numSamples <= source.size);
if (gain != 0.0f && numSamples > 0 && ! source.isClear)
{
float* const d = channels [destChannel] + destStartSample;
const float* const s = source.channels [sourceChannel] + sourceStartSample;
if (isClear)
{
isClear = false;
if (gain != 1.0f)
FloatVectorOperations::copyWithMultiply (d, s, gain, numSamples);
else
FloatVectorOperations::copy (d, s, numSamples);
}
else
{
if (gain != 1.0f)
FloatVectorOperations::addWithMultiply (d, s, gain, numSamples);
else
FloatVectorOperations::add (d, s, numSamples);
}
}
}
void AudioSampleBuffer::addFrom (const int destChannel,
const int destStartSample,
const float* source,
int numSamples,
const float gain) noexcept
{
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (destStartSample >= 0 && destStartSample + numSamples <= size);
jassert (source != nullptr);
if (gain != 0.0f && numSamples > 0)
{
float* const d = channels [destChannel] + destStartSample;
if (isClear)
{
isClear = false;
if (gain != 1.0f)
FloatVectorOperations::copyWithMultiply (d, source, gain, numSamples);
else
FloatVectorOperations::copy (d, source, numSamples);
}
else
{
if (gain != 1.0f)
FloatVectorOperations::addWithMultiply (d, source, gain, numSamples);
else
FloatVectorOperations::add (d, source, numSamples);
}
}
}
void AudioSampleBuffer::addFromWithRamp (const int destChannel,
const int destStartSample,
const float* source,
int numSamples,
float startGain,
const float endGain) noexcept
{
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (destStartSample >= 0 && destStartSample + numSamples <= size);
jassert (source != nullptr);
if (startGain == endGain)
{
addFrom (destChannel, destStartSample, source, numSamples, startGain);
}
else
{
if (numSamples > 0 && (startGain != 0.0f || endGain != 0.0f))
{
isClear = false;
const float increment = (endGain - startGain) / numSamples;
float* d = channels [destChannel] + destStartSample;
while (--numSamples >= 0)
{
*d++ += startGain * *source++;
startGain += increment;
}
}
}
}
void AudioSampleBuffer::copyFrom (const int destChannel,
const int destStartSample,
const AudioSampleBuffer& source,
const int sourceChannel,
const int sourceStartSample,
int numSamples) noexcept
{
jassert (&source != this || sourceChannel != destChannel);
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (destStartSample >= 0 && destStartSample + numSamples <= size);
jassert (isPositiveAndBelow (sourceChannel, source.numChannels));
jassert (sourceStartSample >= 0 && sourceStartSample + numSamples <= source.size);
if (numSamples > 0)
{
if (source.isClear)
{
if (! isClear)
FloatVectorOperations::clear (channels [destChannel] + destStartSample, numSamples);
}
else
{
isClear = false;
FloatVectorOperations::copy (channels [destChannel] + destStartSample,
source.channels [sourceChannel] + sourceStartSample,
numSamples);
}
}
}
void AudioSampleBuffer::copyFrom (const int destChannel,
const int destStartSample,
const float* source,
int numSamples) noexcept
{
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (destStartSample >= 0 && destStartSample + numSamples <= size);
jassert (source != nullptr);
if (numSamples > 0)
{
isClear = false;
FloatVectorOperations::copy (channels [destChannel] + destStartSample, source, numSamples);
}
}
void AudioSampleBuffer::copyFrom (const int destChannel,
const int destStartSample,
const float* source,
int numSamples,
const float gain) noexcept
{
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (destStartSample >= 0 && destStartSample + numSamples <= size);
jassert (source != nullptr);
if (numSamples > 0)
{
float* const d = channels [destChannel] + destStartSample;
if (gain != 1.0f)
{
if (gain == 0)
{
if (! isClear)
FloatVectorOperations::clear (d, numSamples);
}
else
{
isClear = false;
FloatVectorOperations::copyWithMultiply (d, source, gain, numSamples);
}
}
else
{
isClear = false;
FloatVectorOperations::copy (d, source, numSamples);
}
}
}
void AudioSampleBuffer::copyFromWithRamp (const int destChannel,
const int destStartSample,
const float* source,
int numSamples,
float startGain,
float endGain) noexcept
{
jassert (isPositiveAndBelow (destChannel, numChannels));
jassert (destStartSample >= 0 && destStartSample + numSamples <= size);
jassert (source != nullptr);
if (startGain == endGain)
{
copyFrom (destChannel, destStartSample, source, numSamples, startGain);
}
else
{
if (numSamples > 0 && (startGain != 0.0f || endGain != 0.0f))
{
isClear = false;
const float increment = (endGain - startGain) / numSamples;
float* d = channels [destChannel] + destStartSample;
while (--numSamples >= 0)
{
*d++ = startGain * *source++;
startGain += increment;
}
}
}
}
void AudioSampleBuffer::reverse (int channel, int startSample, int numSamples) const noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (startSample >= 0 && startSample + numSamples <= size);
if (! isClear)
std::reverse (channels[channel] + startSample,
channels[channel] + startSample + numSamples);
}
void AudioSampleBuffer::reverse (int startSample, int numSamples) const noexcept
{
for (int i = 0; i < numChannels; ++i)
reverse (i, startSample, numSamples);
}
Range<float> AudioSampleBuffer::findMinMax (const int channel,
const int startSample,
int numSamples) const noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (startSample >= 0 && startSample + numSamples <= size);
if (isClear)
return Range<float>();
return FloatVectorOperations::findMinAndMax (channels [channel] + startSample, numSamples);
}
float AudioSampleBuffer::getMagnitude (const int channel,
const int startSample,
const int numSamples) const noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (startSample >= 0 && startSample + numSamples <= size);
if (isClear)
return 0.0f;
const Range<float> r (findMinMax (channel, startSample, numSamples));
return jmax (r.getStart(), -r.getStart(), r.getEnd(), -r.getEnd());
}
float AudioSampleBuffer::getMagnitude (int startSample, int numSamples) const noexcept
{
float mag = 0.0f;
if (! isClear)
for (int i = 0; i < numChannels; ++i)
mag = jmax (mag, getMagnitude (i, startSample, numSamples));
return mag;
}
float AudioSampleBuffer::getRMSLevel (const int channel,
const int startSample,
const int numSamples) const noexcept
{
jassert (isPositiveAndBelow (channel, numChannels));
jassert (startSample >= 0 && startSample + numSamples <= size);
if (numSamples <= 0 || channel < 0 || channel >= numChannels || isClear)
return 0.0f;
const float* const data = channels [channel] + startSample;
double sum = 0.0;
for (int i = 0; i < numSamples; ++i)
{
const float sample = data [i];
sum += sample * sample;
}
return (float) std::sqrt (sum / numSamples);
}

+ 639
- 92
modules/juce_audio_basics/buffers/juce_AudioSampleBuffer.h
File diff suppressed because it is too large
View File


+ 0
- 1
modules/juce_audio_basics/juce_audio_basics.cpp View File

@@ -80,7 +80,6 @@ namespace juce
{
#include "buffers/juce_AudioDataConverters.cpp"
#include "buffers/juce_AudioSampleBuffer.cpp"
#include "buffers/juce_FloatVectorOperations.cpp"
#include "effects/juce_IIRFilter.cpp"
#include "effects/juce_LagrangeInterpolator.cpp"


+ 1
- 1
modules/juce_audio_basics/juce_audio_basics.h View File

@@ -35,8 +35,8 @@ namespace juce
#undef Factor
#include "buffers/juce_AudioDataConverters.h"
#include "buffers/juce_AudioSampleBuffer.h"
#include "buffers/juce_FloatVectorOperations.h"
#include "buffers/juce_AudioSampleBuffer.h"
#include "effects/juce_Decibels.h"
#include "effects/juce_IIRFilter.h"
#include "effects/juce_LagrangeInterpolator.h"


+ 37
- 6
modules/juce_audio_basics/synthesisers/juce_Synthesiser.cpp View File

@@ -71,6 +71,18 @@ bool SynthesiserVoice::wasStartedBefore (const SynthesiserVoice& other) const no
return noteOnTime < other.noteOnTime;
}
void SynthesiserVoice::renderNextBlock (AudioBuffer<double>& outputBuffer,
int startSample, int numSamples)
{
AudioBuffer<double> subBuffer (outputBuffer.getArrayOfWritePointers(),
outputBuffer.getNumChannels(),
startSample, numSamples);
tempBuffer.makeCopyOf (subBuffer);
renderNextBlock (tempBuffer, 0, numSamples);
subBuffer.makeCopyOf (tempBuffer);
}
//==============================================================================
Synthesiser::Synthesiser()
: sampleRate (0),
@@ -156,8 +168,11 @@ void Synthesiser::setCurrentPlaybackSampleRate (const double newRate)
}
}
void Synthesiser::renderNextBlock (AudioSampleBuffer& outputBuffer, const MidiBuffer& midiData,
int startSample, int numSamples)
template <typename floatType>
void Synthesiser::processNextBlock (AudioBuffer<floatType>& outputAudio,
const MidiBuffer& midiData,
int startSample,
int numSamples)
{
// must set the sample rate before using this!
jassert (sampleRate != 0);
@@ -174,7 +189,7 @@ void Synthesiser::renderNextBlock (AudioSampleBuffer& outputBuffer, const MidiBu
{
if (! midiIterator.getNextEvent (m, midiEventPos))
{
renderVoices (outputBuffer, startSample, numSamples);
renderVoices (outputAudio, startSample, numSamples);
return;
}
@@ -182,7 +197,7 @@ void Synthesiser::renderNextBlock (AudioSampleBuffer& outputBuffer, const MidiBu
if (samplesToNextMidiMessage >= numSamples)
{
renderVoices (outputBuffer, startSample, numSamples);
renderVoices (outputAudio, startSample, numSamples);
handleMidiEvent (m);
break;
}
@@ -193,7 +208,7 @@ void Synthesiser::renderNextBlock (AudioSampleBuffer& outputBuffer, const MidiBu
continue;
}
renderVoices (outputBuffer, startSample, samplesToNextMidiMessage);
renderVoices (outputAudio, startSample, samplesToNextMidiMessage);
handleMidiEvent (m);
startSample += samplesToNextMidiMessage;
numSamples -= samplesToNextMidiMessage;
@@ -203,7 +218,23 @@ void Synthesiser::renderNextBlock (AudioSampleBuffer& outputBuffer, const MidiBu
handleMidiEvent (m);
}
void Synthesiser::renderVoices (AudioSampleBuffer& buffer, int startSample, int numSamples)
// explicit template instantiation
template void Synthesiser::processNextBlock<float> (AudioBuffer<float>& outputAudio,
const MidiBuffer& midiData,
int startSample,
int numSamples);
template void Synthesiser::processNextBlock<double> (AudioBuffer<double>& outputAudio,
const MidiBuffer& midiData,
int startSample,
int numSamples);
void Synthesiser::renderVoices (AudioBuffer<float>& buffer, int startSample, int numSamples)
{
for (int i = voices.size(); --i >= 0;)
voices.getUnchecked (i)->renderNextBlock (buffer, startSample, numSamples);
}
void Synthesiser::renderVoices (AudioBuffer<double>& buffer, int startSample, int numSamples)
{
for (int i = voices.size(); --i >= 0;)
voices.getUnchecked (i)->renderNextBlock (buffer, startSample, numSamples);


+ 24
- 4
modules/juce_audio_basics/synthesisers/juce_Synthesiser.h View File

@@ -182,9 +182,12 @@ public:
involve rendering as little as 1 sample at a time. In between rendering callbacks,
the voice's methods will be called to tell it about note and controller events.
*/
virtual void renderNextBlock (AudioSampleBuffer& outputBuffer,
virtual void renderNextBlock (AudioBuffer<float>& outputBuffer,
int startSample,
int numSamples) = 0;
virtual void renderNextBlock (AudioBuffer<double>& outputBuffer,
int startSample,
int numSamples);
/** Changes the voice's reference sample rate.
@@ -255,6 +258,8 @@ private:
SynthesiserSound::Ptr currentlyPlayingSound;
bool keyIsDown, sustainPedalDown, sostenutoPedalDown;
AudioBuffer<float> tempBuffer;
#if JUCE_CATCH_DEPRECATED_CODE_MISUSE
// Note the new parameters for this method.
virtual int stopNote (bool) { return 0; }
@@ -504,10 +509,17 @@ public:
both to the audio output buffer and the midi input buffer, so any midi events
with timestamps outside the specified region will be ignored.
*/
void renderNextBlock (AudioSampleBuffer& outputAudio,
inline void renderNextBlock (AudioBuffer<float>& outputAudio,
const MidiBuffer& inputMidi,
int startSample,
int numSamples);
int numSamples)
{ processNextBlock (outputAudio, inputMidi, startSample, numSamples); }
inline void renderNextBlock (AudioBuffer<double>& outputAudio,
const MidiBuffer& inputMidi,
int startSample,
int numSamples)
{ processNextBlock (outputAudio, inputMidi, startSample, numSamples); }
/** Returns the current target sample rate at which rendering is being done.
Subclasses may need to know this so that they can pitch things correctly.
@@ -545,7 +557,9 @@ protected:
By default this just calls renderNextBlock() on each voice, but you may need
to override it to handle custom cases.
*/
virtual void renderVoices (AudioSampleBuffer& outputAudio,
virtual void renderVoices (AudioBuffer<float>& outputAudio,
int startSample, int numSamples);
virtual void renderVoices (AudioBuffer<double>& outputAudio,
int startSample, int numSamples);
/** Searches through the voices to find one that's not currently playing, and
@@ -592,6 +606,12 @@ protected:
private:
//==============================================================================
template <typename floatType>
void processNextBlock (AudioBuffer<floatType>& outputAudio,
const MidiBuffer& inputMidi,
int startSample,
int numSamples);
//==============================================================================
double sampleRate;
uint32 lastNoteOnCounter;
int minimumSubBlockSize;


+ 2
- 2
modules/juce_audio_devices/native/juce_mac_CoreAudio.cpp View File

@@ -233,7 +233,7 @@ public:
for (int i = 0; i < numStreams; ++i)
{
const AudioBuffer& b = bufList->mBuffers[i];
const ::AudioBuffer& b = bufList->mBuffers[i];
for (unsigned int j = 0; j < b.mNumberChannels; ++j)
{
@@ -1945,7 +1945,7 @@ private:
for (int i = 0; i < numStreams; ++i)
{
const AudioBuffer& b = bufList->mBuffers[i];
const ::AudioBuffer& b = bufList->mBuffers[i];
total += b.mNumberChannels;
}
}


+ 1
- 1
modules/juce_audio_formats/codecs/juce_CoreAudioFormat.cpp View File

@@ -380,7 +380,7 @@ public:
&destinationAudioFormat);
if (status == noErr)
{
bufferList.malloc (1, sizeof (AudioBufferList) + numChannels * sizeof (AudioBuffer));
bufferList.malloc (1, sizeof (AudioBufferList) + numChannels * sizeof (::AudioBuffer));
bufferList->mNumberBuffers = numChannels;
ok = true;
}


+ 3
- 3
modules/juce_audio_plugin_client/AU/juce_AU_Wrapper.mm View File

@@ -886,7 +886,7 @@ public:
for (unsigned int i = 0; i < outBuffer.mNumberBuffers; ++i)
{
AudioBuffer& buf = outBuffer.mBuffers[i];
::AudioBuffer& buf = outBuffer.mBuffers[i];
if (buf.mNumberChannels == 1)
{
@@ -908,7 +908,7 @@ public:
for (unsigned int i = 0; i < inBuffer.mNumberBuffers; ++i)
{
const AudioBuffer& buf = inBuffer.mBuffers[i];
const ::AudioBuffer& buf = inBuffer.mBuffers[i];
if (buf.mNumberChannels == 1)
{
@@ -1027,7 +1027,7 @@ public:
for (unsigned int i = 0; i < outBuffer.mNumberBuffers; ++i)
{
AudioBuffer& buf = outBuffer.mBuffers[i];
::AudioBuffer& buf = outBuffer.mBuffers[i];
if (buf.mNumberChannels > 1)
{


+ 88
- 26
modules/juce_audio_plugin_client/VST/juce_VST_Wrapper.cpp View File

@@ -243,6 +243,27 @@ class JuceVSTWrapper : public AudioEffectX,
private Timer,
private AsyncUpdater
{
private:
//==============================================================================
template <typename FloatType>
struct VstTempBuffers
{
VstTempBuffers() {}
~VstTempBuffers() { release(); }
void release() noexcept
{
for (int i = tempChannels.size(); --i >= 0;)
delete[] (tempChannels.getUnchecked(i));
tempChannels.clear();
}
HeapBlock<FloatType*> channels;
Array<FloatType*> tempChannels; // see note in processReplacing()
juce::AudioBuffer<FloatType> processTempBuffer;
};
public:
//==============================================================================
JuceVSTWrapper (audioMasterCallback audioMasterCB, AudioProcessor* const af)
@@ -264,7 +285,6 @@ public:
#else
useNSView (false),
#endif
processTempBuffer (1, 1),
hostWindow (0)
{
filter->setPlayConfigDetails (numInChans, numOutChans, 0, 0);
@@ -280,6 +300,7 @@ public:
setNumOutputs (numOutChans);
canProcessReplacing (true);
canDoubleReplacing (filter->supportsDoublePrecisionProcessing());
isSynth ((JucePlugin_IsSynth) != 0);
setInitialDelay (filter->getLatencySamples());
@@ -310,7 +331,6 @@ public:
jassert (editorComp == 0);
channels.free();
deleteTempChannels();
jassert (activePlugins.contains (this));
@@ -500,23 +520,27 @@ public:
void process (float** inputs, float** outputs, VstInt32 numSamples)
{
VstTempBuffers<float>& tmpBuffers = floatTempBuffers;
const int numIn = numInChans;
const int numOut = numOutChans;
processTempBuffer.setSize (numIn, numSamples, false, false, true);
tmpBuffers.processTempBuffer.setSize (numIn, numSamples, false, false, true);
for (int i = numIn; --i >= 0;)
processTempBuffer.copyFrom (i, 0, outputs[i], numSamples);
tmpBuffers.processTempBuffer.copyFrom (i, 0, outputs[i], numSamples);
processReplacing (inputs, outputs, numSamples);
AudioSampleBuffer dest (outputs, numOut, numSamples);
for (int i = jmin (numIn, numOut); --i >= 0;)
dest.addFrom (i, 0, processTempBuffer, i, 0, numSamples);
dest.addFrom (i, 0, tmpBuffers.processTempBuffer, i, 0, numSamples);
}
void processReplacing (float** inputs, float** outputs, VstInt32 numSamples) override
template <typename FloatType>
void internalProcessReplacing (FloatType** inputs, FloatType** outputs,
VstInt32 numSamples, VstTempBuffers<FloatType>& tmpBuffers)
{
if (firstProcessCallback)
{
@@ -561,7 +585,7 @@ public:
int i;
for (i = 0; i < numOut; ++i)
{
float* chan = tempChannels.getUnchecked(i);
FloatType* chan = tmpBuffers.tempChannels.getUnchecked(i);
if (chan == nullptr)
{
@@ -574,24 +598,24 @@ public:
{
if (outputs[j] == chan)
{
chan = new float [blockSize * 2];
tempChannels.set (i, chan);
chan = new FloatType [blockSize * 2];
tmpBuffers.tempChannels.set (i, chan);
break;
}
}
}
if (i < numIn && chan != inputs[i])
memcpy (chan, inputs[i], sizeof (float) * (size_t) numSamples);
memcpy (chan, inputs[i], sizeof (FloatType) * (size_t) numSamples);
channels[i] = chan;
tmpBuffers.channels[i] = chan;
}
for (; i < numIn; ++i)
channels[i] = inputs[i];
tmpBuffers.channels[i] = inputs[i];
{
AudioSampleBuffer chans (channels, jmax (numIn, numOut), numSamples);
AudioBuffer<FloatType> chans (tmpBuffers.channels, jmax (numIn, numOut), numSamples);
if (isBypassed)
filter->processBlockBypassed (chans, midiEvents);
@@ -601,8 +625,8 @@ public:
// copy back any temp channels that may have been used..
for (i = 0; i < numOut; ++i)
if (const float* const chan = tempChannels.getUnchecked(i))
memcpy (outputs[i], chan, sizeof (float) * (size_t) numSamples);
if (const FloatType* const chan = tmpBuffers.tempChannels.getUnchecked(i))
memcpy (outputs[i], chan, sizeof (FloatType) * (size_t) numSamples);
}
}
@@ -648,16 +672,47 @@ public:
}
}
void processReplacing (float** inputs, float** outputs, VstInt32 sampleFrames) override
{
jassert (! filter->isUsingDoublePrecision());
internalProcessReplacing (inputs, outputs, sampleFrames, floatTempBuffers);
}
void processDoubleReplacing (double** inputs, double** outputs, VstInt32 sampleFrames) override
{
jassert (filter->isUsingDoublePrecision());
internalProcessReplacing (inputs, outputs, sampleFrames, doubleTempBuffers);
}
//==============================================================================
VstInt32 startProcess() override { return 0; }
VstInt32 stopProcess() override { return 0; }
//==============================================================================
bool setProcessPrecision (VstInt32 vstPrecision) override
{
if (! isProcessing)
{
if (filter != nullptr)
{
filter->setProcessingPrecision (vstPrecision == kVstProcessPrecision64 && filter->supportsDoublePrecisionProcessing()
? AudioProcessor::doublePrecision
: AudioProcessor::singlePrecision);
return true;
}
}
return false;
}
void resume() override
{
if (filter != nullptr)
{
isProcessing = true;
channels.calloc ((size_t) (numInChans + numOutChans));
floatTempBuffers.channels.calloc ((size_t) (numInChans + numOutChans));
doubleTempBuffers.channels.calloc ((size_t) (numInChans + numOutChans));
double rate = getSampleRate();
jassert (rate > 0);
@@ -699,7 +754,8 @@ public:
outgoingEvents.freeEvents();
isProcessing = false;
channels.free();
floatTempBuffers.channels.free();
doubleTempBuffers.channels.free();
deleteTempChannels();
}
@@ -1457,9 +1513,8 @@ private:
int numInChans, numOutChans;
bool isProcessing, isBypassed, hasShutdown, isInSizeWindow, firstProcessCallback;
bool shouldDeleteEditor, useNSView;
HeapBlock<float*> channels;
Array<float*> tempChannels; // see note in processReplacing()
AudioSampleBuffer processTempBuffer;
VstTempBuffers<float> floatTempBuffers;
VstTempBuffers<double> doubleTempBuffers;
#if JUCE_MAC
void* hostWindow;
@@ -1517,15 +1572,22 @@ private:
#endif
//==============================================================================
void deleteTempChannels()
template <typename FloatType>
void deleteTempChannels (VstTempBuffers<FloatType>& tmpBuffers)
{
for (int i = tempChannels.size(); --i >= 0;)
delete[] (tempChannels.getUnchecked(i));
tempChannels.clear();
tmpBuffers.release();
if (filter != nullptr)
tempChannels.insertMultiple (0, nullptr, filter->getNumInputChannels() + filter->getNumOutputChannels());
{
int numChannels = filter->getNumInputChannels() + filter->getNumOutputChannels();
tmpBuffers.tempChannels.insertMultiple (0, nullptr, numChannels);
}
}
void deleteTempChannels()
{
deleteTempChannels (floatTempBuffers);
deleteTempChannels (doubleTempBuffers);
}
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (JuceVSTWrapper)


+ 122
- 54
modules/juce_audio_plugin_client/VST3/juce_VST3_Wrapper.cpp View File

@@ -743,6 +743,14 @@ private:
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (JuceVST3EditController)
};
namespace
{
template <typename FloatType> struct AudioBusPointerHelper {};
template <> struct AudioBusPointerHelper<float> { static inline float** impl (Vst::AudioBusBuffers& data) noexcept { return data.channelBuffers32; } };
template <> struct AudioBusPointerHelper<double> { static inline double** impl (Vst::AudioBusBuffers& data) noexcept { return data.channelBuffers64; } };
}
//==============================================================================
class JuceVST3Component : public Vst::IComponent,
public Vst::IAudioProcessor,
@@ -956,8 +964,8 @@ public:
? (int) processSetup.maxSamplesPerBlock
: bufferSize;
channelList.clear();
channelList.insertMultiple (0, nullptr, jmax (JucePlugin_MaxNumInputChannels, JucePlugin_MaxNumOutputChannels) + 1);
allocateChannelLists (channelListFloat);
allocateChannelLists (channelListDouble);
preparePlugin (sampleRate, bufferSize);
}
@@ -1440,7 +1448,9 @@ public:
tresult PLUGIN_API canProcessSampleSize (Steinberg::int32 symbolicSampleSize) override
{
return symbolicSampleSize == Vst::kSample32 ? kResultTrue : kResultFalse;
return (symbolicSampleSize == Vst::kSample32
|| (getPluginInstance().supportsDoublePrecisionProcessing()
&& symbolicSampleSize == Vst::kSample64)) ? kResultTrue : kResultFalse;
}
Steinberg::uint32 PLUGIN_API getLatencySamples() override
@@ -1456,6 +1466,10 @@ public:
processSetup = newSetup;
processContext.sampleRate = processSetup.sampleRate;
getPluginInstance().setProcessingPrecision (newSetup.symbolicSampleSize == Vst::kSample64
? AudioProcessor::doublePrecision
: AudioProcessor::singlePrecision);
preparePlugin (processSetup.sampleRate, processSetup.maxSamplesPerBlock);
return kResultTrue;
@@ -1532,9 +1546,13 @@ public:
tresult PLUGIN_API process (Vst::ProcessData& data) override
{
if (pluginInstance == nullptr)
return kResultFalse;
if ((processSetup.symbolicSampleSize == Vst::kSample64) != pluginInstance->isUsingDoublePrecision())
return kResultFalse;
if (data.processContext != nullptr)
processContext = *data.processContext;
else
@@ -1551,60 +1569,19 @@ public:
const int numMidiEventsComingIn = midiBuffer.getNumEvents();
#endif
const int numInputChans = (data.inputs != nullptr && data.inputs[0].channelBuffers32 != nullptr) ? (int) data.inputs[0].numChannels : 0;
const int numOutputChans = (data.outputs != nullptr && data.outputs[0].channelBuffers32 != nullptr) ? (int) data.outputs[0].numChannels : 0;
int totalChans = 0;
while (totalChans < numInputChans)
if (getHostType().isWavelab())
{
channelList.set (totalChans, data.inputs[0].channelBuffers32[totalChans]);
++totalChans;
}
while (totalChans < numOutputChans)
{
channelList.set (totalChans, data.outputs[0].channelBuffers32[totalChans]);
++totalChans;
}
AudioSampleBuffer buffer;
if (totalChans != 0)
buffer.setDataToReferTo (channelList.getRawDataPointer(), totalChans, (int) data.numSamples);
else if (getHostType().isWavelab()
&& pluginInstance->getNumInputChannels() + pluginInstance->getNumOutputChannels() > 0)
return kResultFalse;
{
const ScopedLock sl (pluginInstance->getCallbackLock());
pluginInstance->setNonRealtime (data.processMode == Vst::kOffline);
const int numInputChans = (data.inputs != nullptr && data.inputs[0].channelBuffers32 != nullptr) ? (int) data.inputs[0].numChannels : 0;
const int numOutputChans = (data.outputs != nullptr && data.outputs[0].channelBuffers32 != nullptr) ? (int) data.outputs[0].numChannels : 0;
if (data.inputParameterChanges != nullptr)
processParameterChanges (*data.inputParameterChanges);
if (pluginInstance->isSuspended())
{
buffer.clear();
}
else
{
if (isBypassed())
pluginInstance->processBlockBypassed (buffer, midiBuffer);
else
pluginInstance->processBlock (buffer, midiBuffer);
}
if ((pluginInstance->getNumInputChannels() + pluginInstance->getNumOutputChannels()) > 0
&& (numInputChans + numOutputChans) == 0)
return kResultFalse;
}
for (int i = 0; i < numOutputChans; ++i)
FloatVectorOperations::copy (data.outputs[0].channelBuffers32[i], buffer.getReadPointer (i), (int) data.numSamples);
// clear extra busses..
if (data.outputs != nullptr)
for (int i = 1; i < data.numOutputs; ++i)
for (int f = 0; f < data.outputs[i].numChannels; ++f)
FloatVectorOperations::clear (data.outputs[i].channelBuffers32[f], (int) data.numSamples);
if (processSetup.symbolicSampleSize == Vst::kSample32) processAudio<float> (data, channelListFloat);
else if (processSetup.symbolicSampleSize == Vst::kSample64) processAudio<double> (data, channelListDouble);
else jassertfalse;
#if JucePlugin_ProducesMidiOutput
if (data.outputEvents != nullptr)
@@ -1648,7 +1625,8 @@ private:
Vst::BusList audioInputs, audioOutputs, eventInputs, eventOutputs;
MidiBuffer midiBuffer;
Array<float*> channelList;
Array<float*> channelListFloat;
Array<double*> channelListDouble;
ScopedJuceInitialiser_GUI libraryInitialiser;
@@ -1656,6 +1634,52 @@ private:
static const char* kJucePrivateDataIdentifier;
//==============================================================================
template <typename FloatType>
void processAudio (Vst::ProcessData& data, Array<FloatType*>& channelList)
{
const int totalChans = prepareChannelLists (channelList, data);
AudioBuffer<FloatType> buffer;
if (totalChans != 0)
buffer.setDataToReferTo (channelList.getRawDataPointer(), totalChans, (int) data.numSamples);
{
const ScopedLock sl (pluginInstance->getCallbackLock());
pluginInstance->setNonRealtime (data.processMode == Vst::kOffline);
if (data.inputParameterChanges != nullptr)
processParameterChanges (*data.inputParameterChanges);
if (pluginInstance->isSuspended())
{
buffer.clear();
}
else
{
if (isBypassed())
pluginInstance->processBlockBypassed (buffer, midiBuffer);
else
pluginInstance->processBlock (buffer, midiBuffer);
}
}
if (data.outputs != nullptr)
{
for (int i = 0; i < data.numOutputs; ++i)
FloatVectorOperations::copy (getPointerForAudioBus<FloatType> (data.outputs[0])[i],
buffer.getReadPointer (i), (int) data.numSamples);
}
// clear extra busses..
if (data.outputs != nullptr)
for (int i = 1; i < data.numOutputs; ++i)
for (int f = 0; f < data.outputs[i].numChannels; ++f)
FloatVectorOperations::clear (getPointerForAudioBus<FloatType> (data.outputs[i])[f], (int) data.numSamples);
}
//==============================================================================
void addBusTo (Vst::BusList& busList, Vst::Bus* newBus)
{
@@ -1680,6 +1704,50 @@ private:
return nullptr;
}
//==============================================================================
template <typename FloatType>
void allocateChannelLists (Array<FloatType*>& channelList)
{
channelList.clear();
channelList.insertMultiple (0, nullptr, jmax (JucePlugin_MaxNumInputChannels, JucePlugin_MaxNumOutputChannels) + 1);
}
template <typename FloatType>
static FloatType** getPointerForAudioBus (Vst::AudioBusBuffers& data) noexcept
{
return AudioBusPointerHelper<FloatType>::impl (data);
}
template <typename FloatType>
static int prepareChannelLists (Array<FloatType*>& channelList, Vst::ProcessData& data) noexcept
{
int totalChans = 0;
FloatType** inChannelBuffers =
data.inputs != nullptr ? getPointerForAudioBus<FloatType> (data.inputs[0]) : nullptr;
FloatType** outChannelBuffers =
data.outputs != nullptr ? getPointerForAudioBus<FloatType> (data.outputs[0]) : nullptr;
const int numInputChans = (data.inputs != nullptr && inChannelBuffers != nullptr) ? (int) data.inputs[0].numChannels : 0;
const int numOutputChans = (data.outputs != nullptr && outChannelBuffers != nullptr) ? (int) data.outputs[0].numChannels : 0;
for (int idx = 0; totalChans < numInputChans; ++idx)
{
channelList.set (totalChans, inChannelBuffers[idx]);
++totalChans;
}
// note that the loop bounds are correct: as VST-3 is always process replacing
// we already know the output channel buffers of the first numInputChans channels
for (int idx = 0; totalChans < numOutputChans; ++idx)
{
channelList.set (totalChans, outChannelBuffers[idx]);
++totalChans;
}
return totalChans;
}
//==============================================================================
enum InternalParameters
{


+ 1
- 1
modules/juce_audio_processors/format_types/juce_AudioUnitPluginFormat.mm View File

@@ -1223,7 +1223,7 @@ private:
//==============================================================================
size_t getAudioBufferSizeInBytes() const noexcept
{
return offsetof (AudioBufferList, mBuffers) + (sizeof (AudioBuffer) * numOutputBusChannels);
return offsetof (AudioBufferList, mBuffers) + (sizeof (::AudioBuffer) * numOutputBusChannels);
}
AudioBufferList* getAudioBufferListForBus (AudioUnitElement busIndex) const noexcept


+ 43
- 19
modules/juce_audio_processors/format_types/juce_VST3Common.h View File

@@ -344,21 +344,25 @@ private:
};
//==============================================================================
namespace VST3BufferExchange
template <typename FloatType>
struct VST3BufferExchange
{
typedef Array<float*> Bus;
typedef Array<FloatType*> Bus;
typedef Array<Bus> BusMap;
static inline void assignRawPointer (Steinberg::Vst::AudioBusBuffers& vstBuffers, float** raw) { vstBuffers.channelBuffers32 = raw; }
static inline void assignRawPointer (Steinberg::Vst::AudioBusBuffers& vstBuffers, double** raw) { vstBuffers.channelBuffers64 = raw; }
/** Assigns a series of AudioSampleBuffer's channels to an AudioBusBuffers'
@warning For speed, does not check the channel count and offsets
according to the AudioSampleBuffer
*/
void associateBufferTo (Steinberg::Vst::AudioBusBuffers& vstBuffers,
Bus& bus,
AudioSampleBuffer& buffer,
int numChannels, int channelStartOffset,
int sampleOffset = 0)
static void associateBufferTo (Steinberg::Vst::AudioBusBuffers& vstBuffers,
Bus& bus,
AudioBuffer<FloatType>& buffer,
int numChannels, int channelStartOffset,
int sampleOffset = 0)
{
const int channelEnd = numChannels + channelStartOffset;
jassert (channelEnd >= 0 && channelEnd <= buffer.getNumChannels());
@@ -368,7 +372,7 @@ namespace VST3BufferExchange
for (int i = channelStartOffset; i < channelEnd; ++i)
bus.add (buffer.getWritePointer (i, sampleOffset));
vstBuffers.channelBuffers32 = bus.getRawDataPointer();
assignRawPointer (vstBuffers, bus.getRawDataPointer());
vstBuffers.numChannels = numChannels;
vstBuffers.silenceFlags = 0;
}
@@ -376,7 +380,7 @@ namespace VST3BufferExchange
static void mapArrangementToBusses (int& channelIndexOffset, int index,
Array<Steinberg::Vst::AudioBusBuffers>& result,
BusMap& busMapToUse, Steinberg::Vst::SpeakerArrangement arrangement,
AudioSampleBuffer& source)
AudioBuffer<FloatType>& source)
{
const int numChansForBus = BigInteger ((juce::int64) arrangement).countNumberOfSetBits();
@@ -387,18 +391,16 @@ namespace VST3BufferExchange
busMapToUse.add (Bus());
if (numChansForBus > 0)
{
associateBufferTo (result.getReference (index),
busMapToUse.getReference (index),
source, numChansForBus, channelIndexOffset);
}
channelIndexOffset += numChansForBus;
}
inline void mapBufferToBusses (Array<Steinberg::Vst::AudioBusBuffers>& result, BusMap& busMapToUse,
const Array<Steinberg::Vst::SpeakerArrangement>& arrangements,
AudioSampleBuffer& source)
static inline void mapBufferToBusses (Array<Steinberg::Vst::AudioBusBuffers>& result, BusMap& busMapToUse,
const Array<Steinberg::Vst::SpeakerArrangement>& arrangements,
AudioBuffer<FloatType>& source)
{
int channelIndexOffset = 0;
@@ -407,10 +409,10 @@ namespace VST3BufferExchange
arrangements.getUnchecked (i), source);
}
inline void mapBufferToBusses (Array<Steinberg::Vst::AudioBusBuffers>& result,
Steinberg::Vst::IAudioProcessor& processor,
BusMap& busMapToUse, bool isInput, int numBusses,
AudioSampleBuffer& source)
static inline void mapBufferToBusses (Array<Steinberg::Vst::AudioBusBuffers>& result,
Steinberg::Vst::IAudioProcessor& processor,
BusMap& busMapToUse, bool isInput, int numBusses,
AudioBuffer<FloatType>& source)
{
int channelIndexOffset = 0;
@@ -420,6 +422,28 @@ namespace VST3BufferExchange
getArrangementForBus (&processor, isInput, i),
source);
}
}
};
template <typename FloatType>
struct VST3FloatAndDoubleBusMapCompositeHelper {};
struct VST3FloatAndDoubleBusMapComposite
{
VST3BufferExchange<float>::BusMap floatVersion;
VST3BufferExchange<double>::BusMap doubleVersion;
template <typename FloatType>
inline typename VST3BufferExchange<FloatType>::BusMap& get() { return VST3FloatAndDoubleBusMapCompositeHelper<FloatType>::get (*this); }
};
template <> struct VST3FloatAndDoubleBusMapCompositeHelper<float>
{
static inline VST3BufferExchange<float>::BusMap& get (VST3FloatAndDoubleBusMapComposite& impl) { return impl.floatVersion; }
};
template <> struct VST3FloatAndDoubleBusMapCompositeHelper<double>
{
static inline VST3BufferExchange<double>::BusMap& get (VST3FloatAndDoubleBusMapComposite& impl) { return impl.doubleVersion; }
};
#endif // JUCE_VST3COMMON_H_INCLUDED

+ 47
- 31
modules/juce_audio_processors/format_types/juce_VST3PluginFormat.cpp View File

@@ -1701,7 +1701,7 @@ public:
using namespace Vst;
ProcessSetup setup;
setup.symbolicSampleSize = kSample32;
setup.symbolicSampleSize = isUsingDoublePrecision() ? kSample64 : kSample32;
setup.maxSamplesPerBlock = estimatedSamplesPerBlock;
setup.sampleRate = newSampleRate;
setup.processMode = isNonRealtime() ? kOffline : kRealtime;
@@ -1768,39 +1768,56 @@ public:
JUCE_CATCH_ALL_ASSERT
}
void processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) override
bool supportsDoublePrecisionProcessing() const override
{
using namespace Vst;
return (processor->canProcessSampleSize (Vst::kSample64) == kResultTrue);
}
if (isActive
&& processor != nullptr
&& processor->canProcessSampleSize (kSample32) == kResultTrue)
{
const int numSamples = buffer.getNumSamples();
void processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) override
{
jassert (! isUsingDoublePrecision());
if (isActive && processor != nullptr)
processAudio (buffer, midiMessages, Vst::kSample32);
}
void processBlock (AudioBuffer<double>& buffer, MidiBuffer& midiMessages) override
{
jassert (isUsingDoublePrecision());
if (isActive && processor != nullptr)
processAudio (buffer, midiMessages, Vst::kSample64);
}
template <typename FloatType>
void processAudio (AudioBuffer<FloatType>& buffer, MidiBuffer& midiMessages,
Vst::SymbolicSampleSizes sampleSize)
{
using namespace Vst;
const int numSamples = buffer.getNumSamples();
ProcessData data;
data.processMode = isNonRealtime() ? kOffline : kRealtime;
data.symbolicSampleSize = kSample32;
data.numInputs = numInputAudioBusses;
data.numOutputs = numOutputAudioBusses;
data.inputParameterChanges = inputParameterChanges;
data.outputParameterChanges = outputParameterChanges;
data.numSamples = (Steinberg::int32) numSamples;
ProcessData data;
data.processMode = isNonRealtime() ? kOffline : kRealtime;
data.symbolicSampleSize = sampleSize;
data.numInputs = numInputAudioBusses;
data.numOutputs = numOutputAudioBusses;
data.inputParameterChanges = inputParameterChanges;
data.outputParameterChanges = outputParameterChanges;
data.numSamples = (Steinberg::int32) numSamples;
updateTimingInformation (data, getSampleRate());
updateTimingInformation (data, getSampleRate());
for (int i = getNumInputChannels(); i < buffer.getNumChannels(); ++i)
buffer.clear (i, 0, numSamples);
for (int i = getNumInputChannels(); i < buffer.getNumChannels(); ++i)
buffer.clear (i, 0, numSamples);
associateTo (data, buffer);
associateTo (data, midiMessages);
associateTo (data, buffer);
associateTo (data, midiMessages);
processor->process (data);
processor->process (data);
MidiEventList::toMidiBuffer (midiMessages, *midiOutputs);
MidiEventList::toMidiBuffer (midiMessages, *midiOutputs);
inputParameterChanges->clearAllQueues();
}
inputParameterChanges->clearAllQueues();
}
//==============================================================================
@@ -2150,7 +2167,7 @@ private:
*/
int numInputAudioBusses, numOutputAudioBusses;
Array<Vst::SpeakerArrangement> inputArrangements, outputArrangements; // Caching to improve performance and to avoid possible non-thread-safe calls to getBusArrangements().
VST3BufferExchange::BusMap inputBusMap, outputBusMap;
VST3FloatAndDoubleBusMapComposite inputBusMap, outputBusMap;
Array<Vst::AudioBusBuffers> inputBusses, outputBusses;
//==============================================================================
@@ -2392,12 +2409,11 @@ private:
}
//==============================================================================
void associateTo (Vst::ProcessData& destination, AudioSampleBuffer& buffer)
template <typename FloatType>
void associateTo (Vst::ProcessData& destination, AudioBuffer<FloatType>& buffer)
{
using namespace VST3BufferExchange;
mapBufferToBusses (inputBusses, inputBusMap, inputArrangements, buffer);
mapBufferToBusses (outputBusses, outputBusMap, outputArrangements, buffer);
VST3BufferExchange<FloatType>::mapBufferToBusses (inputBusses, inputBusMap.get<FloatType>(), inputArrangements, buffer);
VST3BufferExchange<FloatType>::mapBufferToBusses (outputBusses, outputBusMap.get<FloatType>(), outputArrangements, buffer);
destination.inputs = inputBusses.getRawDataPointer();
destination.outputs = outputBusses.getRawDataPointer();


+ 145
- 104
modules/juce_audio_processors/format_types/juce_VSTPluginFormat.cpp View File

@@ -715,8 +715,7 @@ public:
name (mh->pluginName),
wantsMidiMessages (false),
initialised (false),
isPowerOn (false),
tempBuffer (1, 1)
isPowerOn (false)
{
try
{
@@ -938,6 +937,16 @@ public:
dispatch (effSetSampleRate, 0, 0, 0, (float) rate);
dispatch (effSetBlockSize, 0, jmax (16, samplesPerBlockExpected), 0, 0);
if (supportsDoublePrecisionProcessing())
{
VstInt32 vstPrecision = isUsingDoublePrecision() ? kVstProcessPrecision64
: kVstProcessPrecision32;
// if you get an assertion here then your plug-in claims it supports double precision
// but returns an error when we try to change the precision
jassert (dispatch (effSetProcessPrecision, 0, (VstIntPtr) vstPrecision, 0, 0) > 0);
}
tempBuffer.setSize (jmax (1, effect->numOutputs), samplesPerBlockExpected);
if (! isPowerOn)
@@ -980,110 +989,22 @@ public:
}
}
void processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages) override
void processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages) override
{
const int numSamples = buffer.getNumSamples();
if (initialised)
{
if (AudioPlayHead* const currentPlayHead = getPlayHead())
{
AudioPlayHead::CurrentPositionInfo position;
if (currentPlayHead->getCurrentPosition (position))
{
vstHostTime.samplePos = (double) position.timeInSamples;
vstHostTime.tempo = position.bpm;
vstHostTime.timeSigNumerator = position.timeSigNumerator;
vstHostTime.timeSigDenominator = position.timeSigDenominator;
vstHostTime.ppqPos = position.ppqPosition;
vstHostTime.barStartPos = position.ppqPositionOfLastBarStart;
vstHostTime.flags |= kVstTempoValid | kVstTimeSigValid | kVstPpqPosValid | kVstBarsValid;
VstInt32 newTransportFlags = 0;
if (position.isPlaying) newTransportFlags |= kVstTransportPlaying;
if (position.isRecording) newTransportFlags |= kVstTransportRecording;
if (newTransportFlags != (vstHostTime.flags & (kVstTransportPlaying | kVstTransportRecording)))
vstHostTime.flags = (vstHostTime.flags & ~(kVstTransportPlaying | kVstTransportRecording)) | newTransportFlags | kVstTransportChanged;
else
vstHostTime.flags &= ~kVstTransportChanged;
switch (position.frameRate)
{
case AudioPlayHead::fps24: setHostTimeFrameRate (0, 24.0, position.timeInSeconds); break;
case AudioPlayHead::fps25: setHostTimeFrameRate (1, 25.0, position.timeInSeconds); break;
case AudioPlayHead::fps2997: setHostTimeFrameRate (2, 29.97, position.timeInSeconds); break;
case AudioPlayHead::fps30: setHostTimeFrameRate (3, 30.0, position.timeInSeconds); break;
case AudioPlayHead::fps2997drop: setHostTimeFrameRate (4, 29.97, position.timeInSeconds); break;
case AudioPlayHead::fps30drop: setHostTimeFrameRate (5, 29.97, position.timeInSeconds); break;
default: break;
}
if (position.isLooping)
{
vstHostTime.cycleStartPos = position.ppqLoopStart;
vstHostTime.cycleEndPos = position.ppqLoopEnd;
vstHostTime.flags |= (kVstCyclePosValid | kVstTransportCycleActive);
}
else
{
vstHostTime.flags &= ~(kVstCyclePosValid | kVstTransportCycleActive);
}
}
}
vstHostTime.nanoSeconds = getVSTHostTimeNanoseconds();
if (wantsMidiMessages)
{
midiEventsToSend.clear();
midiEventsToSend.ensureSize (1);
MidiBuffer::Iterator iter (midiMessages);
const uint8* midiData;
int numBytesOfMidiData, samplePosition;
while (iter.getNextEvent (midiData, numBytesOfMidiData, samplePosition))
{
midiEventsToSend.addEvent (midiData, numBytesOfMidiData,
jlimit (0, numSamples - 1, samplePosition));
}
effect->dispatcher (effect, effProcessEvents, 0, 0, midiEventsToSend.events, 0);
}
_clearfp();
if ((effect->flags & effFlagsCanReplacing) != 0)
{
effect->processReplacing (effect, buffer.getArrayOfWritePointers(), buffer.getArrayOfWritePointers(), numSamples);
}
else
{
tempBuffer.setSize (effect->numOutputs, numSamples);
tempBuffer.clear();
effect->process (effect, buffer.getArrayOfWritePointers(), tempBuffer.getArrayOfWritePointers(), numSamples);
for (int i = effect->numOutputs; --i >= 0;)
buffer.copyFrom (i, 0, tempBuffer.getReadPointer (i), numSamples);
}
}
else
{
// Not initialised, so just bypass..
for (int i = 0; i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, buffer.getNumSamples());
}
jassert (! isUsingDoublePrecision());
processAudio (buffer, midiMessages);
}
{
// copy any incoming midi..
const ScopedLock sl (midiInLock);
void processBlock (AudioBuffer<double>& buffer, MidiBuffer& midiMessages) override
{
jassert (isUsingDoublePrecision());
processAudio (buffer, midiMessages);
}
midiMessages.swapWith (incomingMidi);
incomingMidi.clear();
}
bool supportsDoublePrecisionProcessing() const override
{
return ((effect->flags & effFlagsCanReplacing) != 0
&& (effect->flags & effFlagsCanDoubleReplacing) != 0);
}
//==============================================================================
@@ -1673,12 +1594,132 @@ private:
CriticalSection lock;
bool wantsMidiMessages, initialised, isPowerOn;
mutable StringArray programNames;
AudioSampleBuffer tempBuffer;
AudioBuffer<float> tempBuffer;
CriticalSection midiInLock;
MidiBuffer incomingMidi;
VSTMidiEventList midiEventsToSend;
VstTimeInfo vstHostTime;
//==============================================================================
template <typename FloatType>
void processAudio (AudioBuffer<FloatType>& buffer, MidiBuffer& midiMessages)
{
const int numSamples = buffer.getNumSamples();
if (initialised)
{
if (AudioPlayHead* const currentPlayHead = getPlayHead())
{
AudioPlayHead::CurrentPositionInfo position;
if (currentPlayHead->getCurrentPosition (position))
{
vstHostTime.samplePos = (double) position.timeInSamples;
vstHostTime.tempo = position.bpm;
vstHostTime.timeSigNumerator = position.timeSigNumerator;
vstHostTime.timeSigDenominator = position.timeSigDenominator;
vstHostTime.ppqPos = position.ppqPosition;
vstHostTime.barStartPos = position.ppqPositionOfLastBarStart;
vstHostTime.flags |= kVstTempoValid | kVstTimeSigValid | kVstPpqPosValid | kVstBarsValid;
VstInt32 newTransportFlags = 0;
if (position.isPlaying) newTransportFlags |= kVstTransportPlaying;
if (position.isRecording) newTransportFlags |= kVstTransportRecording;
if (newTransportFlags != (vstHostTime.flags & (kVstTransportPlaying | kVstTransportRecording)))
vstHostTime.flags = (vstHostTime.flags & ~(kVstTransportPlaying | kVstTransportRecording)) | newTransportFlags | kVstTransportChanged;
else
vstHostTime.flags &= ~kVstTransportChanged;
switch (position.frameRate)
{
case AudioPlayHead::fps24: setHostTimeFrameRate (0, 24.0, position.timeInSeconds); break;
case AudioPlayHead::fps25: setHostTimeFrameRate (1, 25.0, position.timeInSeconds); break;
case AudioPlayHead::fps2997: setHostTimeFrameRate (2, 29.97, position.timeInSeconds); break;
case AudioPlayHead::fps30: setHostTimeFrameRate (3, 30.0, position.timeInSeconds); break;
case AudioPlayHead::fps2997drop: setHostTimeFrameRate (4, 29.97, position.timeInSeconds); break;
case AudioPlayHead::fps30drop: setHostTimeFrameRate (5, 29.97, position.timeInSeconds); break;
default: break;
}
if (position.isLooping)
{
vstHostTime.cycleStartPos = position.ppqLoopStart;
vstHostTime.cycleEndPos = position.ppqLoopEnd;
vstHostTime.flags |= (kVstCyclePosValid | kVstTransportCycleActive);
}
else
{
vstHostTime.flags &= ~(kVstCyclePosValid | kVstTransportCycleActive);
}
}
}
vstHostTime.nanoSeconds = getVSTHostTimeNanoseconds();
if (wantsMidiMessages)
{
midiEventsToSend.clear();
midiEventsToSend.ensureSize (1);
MidiBuffer::Iterator iter (midiMessages);
const uint8* midiData;
int numBytesOfMidiData, samplePosition;
while (iter.getNextEvent (midiData, numBytesOfMidiData, samplePosition))
{
midiEventsToSend.addEvent (midiData, numBytesOfMidiData,
jlimit (0, numSamples - 1, samplePosition));
}
effect->dispatcher (effect, effProcessEvents, 0, 0, midiEventsToSend.events, 0);
}
_clearfp();
invokeProcessFunction (buffer, numSamples);
}
else
{
// Not initialised, so just bypass..
for (int i = 0; i < getNumOutputChannels(); ++i)
buffer.clear (i, 0, buffer.getNumSamples());
}
{
// copy any incoming midi..
const ScopedLock sl (midiInLock);
midiMessages.swapWith (incomingMidi);
incomingMidi.clear();
}
}
//==============================================================================
inline void invokeProcessFunction (AudioBuffer<float>& buffer, VstInt32 sampleFrames)
{
if ((effect->flags & effFlagsCanReplacing) != 0)
{
effect->processReplacing (effect, buffer.getArrayOfWritePointers(), buffer.getArrayOfWritePointers(), sampleFrames);
}
else
{
tempBuffer.setSize (effect->numOutputs, sampleFrames);
tempBuffer.clear();
effect->process (effect, buffer.getArrayOfWritePointers(), tempBuffer.getArrayOfWritePointers(), sampleFrames);
for (int i = effect->numOutputs; --i >= 0;)
buffer.copyFrom (i, 0, tempBuffer.getReadPointer (i), sampleFrames);
}
}
inline void invokeProcessFunction (AudioBuffer<double>& buffer, VstInt32 sampleFrames)
{
effect->processDoubleReplacing (effect, buffer.getArrayOfWritePointers(), buffer.getArrayOfWritePointers(), sampleFrames);
}
//==============================================================================
void setHostTimeFrameRate (long frameRateIndex, double frameRate, double currentTime) noexcept
{


+ 29
- 2
modules/juce_audio_processors/processors/juce_AudioProcessor.cpp View File

@@ -38,7 +38,8 @@ AudioProcessor::AudioProcessor()
numOutputChannels (0),
latencySamples (0),
suspended (false),
nonRealtime (false)
nonRealtime (false),
processingPrecision (singlePrecision)
{
}
@@ -323,7 +324,33 @@ void AudioProcessor::suspendProcessing (const bool shouldBeSuspended)
}
void AudioProcessor::reset() {}
void AudioProcessor::processBlockBypassed (AudioSampleBuffer&, MidiBuffer&) {}
void AudioProcessor::processBlockBypassed (AudioBuffer<float>&, MidiBuffer&) {}
void AudioProcessor::processBlockBypassed (AudioBuffer<double>&, MidiBuffer&) {}
void AudioProcessor::processBlock (AudioBuffer<double>& buffer, MidiBuffer& midiMessages)
{
ignoreUnused (buffer, midiMessages);
// If you hit this assertion then either the caller called the double
// precision version of processBlock on a processor which does not support it
// (i.e. supportsDoublePrecisionProcessing() returns false), or the implementation
// of the AudioProcessor forgot to override the double precision version of this method
jassertfalse;
}
void AudioProcessor::setProcessingPrecision (ProcessingPrecision precision) noexcept
{
// If you hit this assertion then you're trying to use double precision
// processing on a processor which does not support it!
jassert (precision != doublePrecision || supportsDoublePrecisionProcessing());
processingPrecision = precision;
}
bool AudioProcessor::supportsDoublePrecisionProcessing() const
{
return false;
}
//==============================================================================
void AudioProcessor::editorBeingDeleted (AudioProcessorEditor* const editor) noexcept


+ 113
- 2
modules/juce_audio_processors/processors/juce_AudioProcessor.h View File

@@ -48,6 +48,14 @@ protected:
AudioProcessor();
public:
//==============================================================================
enum ProcessingPrecision
{
singlePrecision,
doublePrecision
};
//==============================================================================
/** Destructor. */
virtual ~AudioProcessor();
@@ -125,9 +133,74 @@ public:
processBlock() method to send out an asynchronous message. You could also use
the AsyncUpdater class in a similar way.
*/
virtual void processBlock (AudioSampleBuffer& buffer,
virtual void processBlock (AudioBuffer<float>& buffer,
MidiBuffer& midiMessages) = 0;
/** Renders the next block.
When this method is called, the buffer contains a number of channels which is
at least as great as the maximum number of input and output channels that
this filter is using. It will be filled with the filter's input data and
should be replaced with the filter's output.
So for example if your filter has 2 input channels and 4 output channels, then
the buffer will contain 4 channels, the first two being filled with the
input data. Your filter should read these, do its processing, and replace
the contents of all 4 channels with its output.
Or if your filter has 5 inputs and 2 outputs, the buffer will have 5 channels,
all filled with data, and your filter should overwrite the first 2 of these
with its output. But be VERY careful not to write anything to the last 3
channels, as these might be mapped to memory that the host assumes is read-only!
Note that if you have more outputs than inputs, then only those channels that
correspond to an input channel are guaranteed to contain sensible data - e.g.
in the case of 2 inputs and 4 outputs, the first two channels contain the input,
but the last two channels may contain garbage, so you should be careful not to
let this pass through without being overwritten or cleared.
Also note that the buffer may have more channels than are strictly necessary,
but you should only read/write from the ones that your filter is supposed to
be using.
The number of samples in these buffers is NOT guaranteed to be the same for every
callback, and may be more or less than the estimated value given to prepareToPlay().
Your code must be able to cope with variable-sized blocks, or you're going to get
clicks and crashes!
Also note that some hosts will occasionally decide to pass a buffer containing
zero samples, so make sure that your algorithm can deal with that!
If the filter is receiving a midi input, then the midiMessages array will be filled
with the midi messages for this block. Each message's timestamp will indicate the
message's time, as a number of samples from the start of the block.
Any messages left in the midi buffer when this method has finished are assumed to
be the filter's midi output. This means that your filter should be careful to
clear any incoming messages from the array if it doesn't want them to be passed-on.
Be very careful about what you do in this callback - it's going to be called by
the audio thread, so any kind of interaction with the UI is absolutely
out of the question. If you change a parameter in here and need to tell your UI to
update itself, the best way is probably to inherit from a ChangeBroadcaster, let
the UI components register as listeners, and then call sendChangeMessage() inside the
processBlock() method to send out an asynchronous message. You could also use
the AsyncUpdater class in a similar way.
*/
virtual void processBlock (AudioBuffer<double>& buffer,
MidiBuffer& midiMessages);
/** Renders the next block when the processor is being bypassed.
The default implementation of this method will pass-through any incoming audio, but
you may override this method e.g. to add latency compensation to the data to match
the processor's latency characteristics. This will avoid situations where bypassing
will shift the signal forward in time, possibly creating pre-echo effects and odd timings.
Another use for this method would be to cross-fade or morph between the wet (not bypassed)
and dry (bypassed) signals.
*/
virtual void processBlockBypassed (AudioBuffer<float>& buffer,
MidiBuffer& midiMessages);
/** Renders the next block when the processor is being bypassed.
The default implementation of this method will pass-through any incoming audio, but
you may override this method e.g. to add latency compensation to the data to match
@@ -136,9 +209,46 @@ public:
Another use for this method would be to cross-fade or morph between the wet (not bypassed)
and dry (bypassed) signals.
*/
virtual void processBlockBypassed (AudioSampleBuffer& buffer,
virtual void processBlockBypassed (AudioBuffer<double>& buffer,
MidiBuffer& midiMessages);
//==============================================================================
/** Returns true if the Audio processor supports double precision floating point processing.
The default implementation will always return false.
If you return true here then you must override the double precision versions
of processBlock. Additionally, you must call getProcessingPrecision() in
your prepareToPlay method to determine the precision with which you need to
allocate your internal buffers.
@see getProcessingPrecision, setProcessingPrecision
*/
virtual bool supportsDoublePrecisionProcessing() const;
/** Returns the precision-mode of the processor.
Depending on the result of this method you MUST call the corresponding version
of processBlock. The default processing precision is single precision.
@see setProcessingPrecision, supportsDoublePrecisionProcessing
*/
ProcessingPrecision getProcessingPrecision() const noexcept { return processingPrecision; }
/** Returns true if the current precision is set to doublePrecision. */
bool isUsingDoublePrecision() const noexcept { return processingPrecision == doublePrecision; }
/** Changes the processing precision of the receiver. A client of the AudioProcessor
calls this function to indicate which version of processBlock (single or double
precision) it intends to call. The client MUST call this function before calling
the prepareToPlay method so that the receiver can do any necessary allocations
in the prepareToPlay() method. An implementation of prepareToPlay() should call
getProcessingPrecision() to determine with which precision it should allocate
it's internal buffers.
Note that setting the processing precision to double floating point precision
on a receiver which does not support double precision processing (i.e.
supportsDoublePrecisionProcessing() returns false) will result in an assertion.
@see getProcessingPrecision, supportsDoublePrecisionProcessing
*/
void setProcessingPrecision (ProcessingPrecision precision) noexcept;
//==============================================================================
/** Returns the current AudioPlayHead object that should be used to find
out the state and position of the playhead.
@@ -731,6 +841,7 @@ private:
double sampleRate;
int blockSize, numInputChannels, numOutputChannels, latencySamples;
bool suspended, nonRealtime;
ProcessingPrecision processingPrecision;
CriticalSection callbackLock, listenerLock;
String inputSpeakerArrangement, outputSpeakerArrangement;


+ 235
- 54
modules/juce_audio_processors/processors/juce_AudioProcessorGraph.cpp View File

@@ -25,28 +25,81 @@
const int AudioProcessorGraph::midiChannelIndex = 0x1000;
//==============================================================================
namespace GraphRenderingOps
template <typename FloatType, typename Impl> struct FloatDoubleUtil {};
template <typename Tag, typename Type> struct FloatDoubleType {};
template <typename Tag>
struct FloatAndDoubleComposition
{
typedef typename FloatDoubleType<Tag, float>::Type FloatType;
typedef typename FloatDoubleType<Tag, double>::Type DoubleType;
template <typename FloatingType>
inline typename FloatDoubleType<Tag, FloatingType>::Type& get() noexcept
{
return FloatDoubleUtil<FloatingType, FloatAndDoubleComposition<Tag> >::get (*this);
}
FloatType floatVersion;
DoubleType doubleVersion;
};
template <typename Impl> struct FloatDoubleUtil<float, Impl> { static inline typename Impl::FloatType& get (Impl& i) noexcept { return i.floatVersion; } };
template <typename Impl> struct FloatDoubleUtil<double, Impl> { static inline typename Impl::DoubleType& get (Impl& i) noexcept { return i.doubleVersion; } };
struct FloatPlaceholder;
template <typename FloatingType> struct FloatDoubleType<HeapBlock<FloatPlaceholder>, FloatingType> { typedef HeapBlock<FloatingType> Type; };
template <typename FloatingType> struct FloatDoubleType<HeapBlock<FloatPlaceholder*>, FloatingType> { typedef HeapBlock<FloatingType*> Type; };
template <typename FloatingType> struct FloatDoubleType<AudioBuffer<FloatPlaceholder>, FloatingType> { typedef AudioBuffer<FloatingType> Type; };
template <typename FloatingType> struct FloatDoubleType<AudioBuffer<FloatPlaceholder>*, FloatingType> { typedef AudioBuffer<FloatingType>* Type; };
//==============================================================================
struct AudioGraphRenderingOp
namespace GraphRenderingOps
{
AudioGraphRenderingOp() noexcept {}
virtual ~AudioGraphRenderingOp() {}
virtual void perform (AudioSampleBuffer& sharedBufferChans,
struct AudioGraphRenderingOpBase
{
AudioGraphRenderingOpBase() noexcept {}
virtual ~AudioGraphRenderingOpBase() {}
virtual void perform (AudioBuffer<float>& sharedBufferChans,
const OwnedArray<MidiBuffer>& sharedMidiBuffers,
const int numSamples) = 0;
virtual void perform (AudioBuffer<double>& sharedBufferChans,
const OwnedArray<MidiBuffer>& sharedMidiBuffers,
const int numSamples) = 0;
JUCE_LEAK_DETECTOR (AudioGraphRenderingOp)
JUCE_LEAK_DETECTOR (AudioGraphRenderingOpBase)
};
// use CRTP
template <class Child>
struct AudioGraphRenderingOp : public AudioGraphRenderingOpBase
{
void perform (AudioBuffer<float>& sharedBufferChans,
const OwnedArray<MidiBuffer>& sharedMidiBuffers,
const int numSamples) override
{
static_cast<Child*> (this)->perform (sharedBufferChans, sharedMidiBuffers, numSamples);
}
void perform (AudioBuffer<double>& sharedBufferChans,
const OwnedArray<MidiBuffer>& sharedMidiBuffers,
const int numSamples) override
{
static_cast<Child*> (this)->perform (sharedBufferChans, sharedMidiBuffers, numSamples);
}
};
//==============================================================================
struct ClearChannelOp : public AudioGraphRenderingOp
struct ClearChannelOp : public AudioGraphRenderingOp<ClearChannelOp>
{
ClearChannelOp (const int channel) noexcept : channelNum (channel) {}
void perform (AudioSampleBuffer& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
template <typename FloatType>
void perform (AudioBuffer<FloatType>& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
{
sharedBufferChans.clear (channelNum, 0, numSamples);
}
@@ -57,13 +110,14 @@ struct ClearChannelOp : public AudioGraphRenderingOp
};
//==============================================================================
struct CopyChannelOp : public AudioGraphRenderingOp
struct CopyChannelOp : public AudioGraphRenderingOp<CopyChannelOp>
{
CopyChannelOp (const int srcChan, const int dstChan) noexcept
: srcChannelNum (srcChan), dstChannelNum (dstChan)
{}
void perform (AudioSampleBuffer& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
template <typename FloatType>
void perform (AudioBuffer<FloatType>& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
{
sharedBufferChans.copyFrom (dstChannelNum, 0, sharedBufferChans, srcChannelNum, 0, numSamples);
}
@@ -74,13 +128,14 @@ struct CopyChannelOp : public AudioGraphRenderingOp
};
//==============================================================================
struct AddChannelOp : public AudioGraphRenderingOp
struct AddChannelOp : public AudioGraphRenderingOp<AddChannelOp>
{
AddChannelOp (const int srcChan, const int dstChan) noexcept
: srcChannelNum (srcChan), dstChannelNum (dstChan)
{}
void perform (AudioSampleBuffer& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
template <typename FloatType>
void perform (AudioBuffer<FloatType>& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
{
sharedBufferChans.addFrom (dstChannelNum, 0, sharedBufferChans, srcChannelNum, 0, numSamples);
}
@@ -91,11 +146,12 @@ struct AddChannelOp : public AudioGraphRenderingOp
};
//==============================================================================
struct ClearMidiBufferOp : public AudioGraphRenderingOp
struct ClearMidiBufferOp : public AudioGraphRenderingOp<ClearMidiBufferOp>
{
ClearMidiBufferOp (const int buffer) noexcept : bufferNum (buffer) {}
void perform (AudioSampleBuffer&, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int)
template <typename FloatType>
void perform (AudioBuffer<FloatType>&, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int)
{
sharedMidiBuffers.getUnchecked (bufferNum)->clear();
}
@@ -106,13 +162,14 @@ struct ClearMidiBufferOp : public AudioGraphRenderingOp
};
//==============================================================================
struct CopyMidiBufferOp : public AudioGraphRenderingOp
struct CopyMidiBufferOp : public AudioGraphRenderingOp<CopyMidiBufferOp>
{
CopyMidiBufferOp (const int srcBuffer, const int dstBuffer) noexcept
: srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
{}
void perform (AudioSampleBuffer&, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int)
template <typename FloatType>
void perform (AudioBuffer<FloatType>&, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int)
{
*sharedMidiBuffers.getUnchecked (dstBufferNum) = *sharedMidiBuffers.getUnchecked (srcBufferNum);
}
@@ -123,13 +180,14 @@ struct CopyMidiBufferOp : public AudioGraphRenderingOp
};
//==============================================================================
struct AddMidiBufferOp : public AudioGraphRenderingOp
struct AddMidiBufferOp : public AudioGraphRenderingOp<AddMidiBufferOp>
{
AddMidiBufferOp (const int srcBuffer, const int dstBuffer)
: srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
{}
void perform (AudioSampleBuffer&, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int numSamples)
template <typename FloatType>
void perform (AudioBuffer<FloatType>&, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int numSamples)
{
sharedMidiBuffers.getUnchecked (dstBufferNum)
->addEvents (*sharedMidiBuffers.getUnchecked (srcBufferNum), 0, numSamples, 0);
@@ -141,24 +199,27 @@ struct AddMidiBufferOp : public AudioGraphRenderingOp
};
//==============================================================================
struct DelayChannelOp : public AudioGraphRenderingOp
struct DelayChannelOp : public AudioGraphRenderingOp<DelayChannelOp>
{
DelayChannelOp (const int chan, const int delaySize)
: channel (chan),
bufferSize (delaySize + 1),
readIndex (0), writeIndex (delaySize)
{
buffer.calloc ((size_t) bufferSize);
buffer.floatVersion. calloc ((size_t) bufferSize);
buffer.doubleVersion.calloc ((size_t) bufferSize);
}
void perform (AudioSampleBuffer& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
template <typename FloatType>
void perform (AudioBuffer<FloatType>& sharedBufferChans, const OwnedArray<MidiBuffer>&, const int numSamples)
{
float* data = sharedBufferChans.getWritePointer (channel, 0);
FloatType* data = sharedBufferChans.getWritePointer (channel, 0);
HeapBlock<FloatType>& block = buffer.get<FloatType>();
for (int i = numSamples; --i >= 0;)
{
buffer [writeIndex] = *data;
*data++ = buffer [readIndex];
block [writeIndex] = *data;
*data++ = block [readIndex];
if (++readIndex >= bufferSize) readIndex = 0;
if (++writeIndex >= bufferSize) writeIndex = 0;
@@ -166,41 +227,67 @@ struct DelayChannelOp : public AudioGraphRenderingOp
}
private:
HeapBlock<float> buffer;
FloatAndDoubleComposition<HeapBlock<FloatPlaceholder> > buffer;
const int channel, bufferSize;
int readIndex, writeIndex;
JUCE_DECLARE_NON_COPYABLE (DelayChannelOp)
};
//==============================================================================
struct ProcessBufferOp : public AudioGraphRenderingOp
struct ProcessBufferOp : public AudioGraphRenderingOp<ProcessBufferOp>
{
ProcessBufferOp (const AudioProcessorGraph::Node::Ptr& n,
const Array<int>& audioChannels,
const Array<int>& audioChannelsUsed,
const int totalNumChans,
const int midiBuffer)
: node (n),
processor (n->getProcessor()),
audioChannelsToUse (audioChannels),
audioChannelsToUse (audioChannelsUsed),
totalChans (jmax (1, totalNumChans)),
midiBufferToUse (midiBuffer)
{
channels.calloc ((size_t) totalChans);
audioChannels.floatVersion. calloc ((size_t) totalChans);
audioChannels.doubleVersion.calloc ((size_t) totalChans);
while (audioChannelsToUse.size() < totalChans)
audioChannelsToUse.add (0);
}
void perform (AudioSampleBuffer& sharedBufferChans, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int numSamples)
template <typename FloatType>
void perform (AudioBuffer<FloatType>& sharedBufferChans, const OwnedArray<MidiBuffer>& sharedMidiBuffers, const int numSamples)
{
HeapBlock<FloatType*>& channels = audioChannels.get<FloatType>();
for (int i = totalChans; --i >= 0;)
channels[i] = sharedBufferChans.getWritePointer (audioChannelsToUse.getUnchecked (i), 0);
AudioSampleBuffer buffer (channels, totalChans, numSamples);
AudioBuffer<FloatType> buffer (channels, totalChans, numSamples);
processor->processBlock (buffer, *sharedMidiBuffers.getUnchecked (midiBufferToUse));
callProcess (buffer, *sharedMidiBuffers.getUnchecked (midiBufferToUse));
}
void callProcess (AudioBuffer<float>& buffer, MidiBuffer& midiMessages)
{
processor->processBlock (buffer, midiMessages);
}
void callProcess (AudioBuffer<double>& buffer, MidiBuffer& midiMessages)
{
if (processor->isUsingDoublePrecision())
{
processor->processBlock (buffer, midiMessages);
}
else
{
// if the processor is in single precision mode but the graph in double
// precision then we need to convert between buffer formats. Note, that
// this will only happen if the processor does not support double
// precision processing.
tempBuffer.makeCopyOf (buffer);
processor->processBlock (tempBuffer, midiMessages);
buffer.makeCopyOf (tempBuffer);
}
}
const AudioProcessorGraph::Node::Ptr node;
@@ -208,7 +295,8 @@ struct ProcessBufferOp : public AudioGraphRenderingOp
private:
Array<int> audioChannelsToUse;
HeapBlock<float*> channels;
FloatAndDoubleComposition<HeapBlock<FloatPlaceholder*> > audioChannels;
AudioBuffer<float> tempBuffer;
const int totalChans;
const int midiBufferToUse;
@@ -861,13 +949,17 @@ AudioProcessorGraph::Node::Node (const uint32 nodeID, AudioProcessor* const p) n
}
void AudioProcessorGraph::Node::prepare (const double newSampleRate, const int newBlockSize,
AudioProcessorGraph* const graph)
AudioProcessorGraph* const graph, ProcessingPrecision precision)
{
if (! isPrepared)
{
isPrepared = true;
setParentGraph (graph);
// try to align the precision of the processor and the graph
processor->setProcessingPrecision (processor->supportsDoublePrecisionProcessing() ? precision
: singlePrecision);
processor->setPlayConfigDetails (processor->getNumInputChannels(),
processor->getNumOutputChannels(),
newSampleRate, newBlockSize);
@@ -892,10 +984,53 @@ void AudioProcessorGraph::Node::setParentGraph (AudioProcessorGraph* const graph
ioProc->setParentGraph (graph);
}
//==============================================================================
struct AudioProcessorGraph::AudioProcessorGraphBufferHelpers
{
AudioProcessorGraphBufferHelpers()
{
currentAudioInputBuffer.floatVersion = nullptr;
currentAudioInputBuffer.doubleVersion = nullptr;
}
void setRenderingBufferSize (int newNumChannels, int newNumSamples)
{
renderingBuffers.floatVersion. setSize (newNumChannels, newNumSamples);
renderingBuffers.doubleVersion.setSize (newNumChannels, newNumSamples);
renderingBuffers.floatVersion. clear();
renderingBuffers.doubleVersion.clear();
}
void release()
{
renderingBuffers.floatVersion. setSize (1, 1);
renderingBuffers.doubleVersion.setSize (1, 1);
currentAudioInputBuffer.floatVersion = nullptr;
currentAudioInputBuffer.doubleVersion = nullptr;
currentAudioOutputBuffer.floatVersion. setSize (1, 1);
currentAudioOutputBuffer.doubleVersion.setSize (1, 1);
}
void prepareInOutBuffers(int newNumChannels, int newNumSamples)
{
currentAudioInputBuffer.floatVersion = nullptr;
currentAudioInputBuffer.doubleVersion = nullptr;
currentAudioOutputBuffer.floatVersion. setSize (newNumChannels, newNumSamples);
currentAudioOutputBuffer.doubleVersion.setSize (newNumChannels, newNumSamples);
}
FloatAndDoubleComposition<AudioBuffer<FloatPlaceholder> > renderingBuffers;
FloatAndDoubleComposition<AudioBuffer<FloatPlaceholder>*> currentAudioInputBuffer;
FloatAndDoubleComposition<AudioBuffer<FloatPlaceholder> > currentAudioOutputBuffer;
};
//==============================================================================
AudioProcessorGraph::AudioProcessorGraph()
: lastNodeId (0),
currentAudioInputBuffer (nullptr),
: lastNodeId (0), audioBuffers (new AudioProcessorGraphBufferHelpers),
currentMidiInputBuffer (nullptr)
{
}
@@ -1140,7 +1275,7 @@ bool AudioProcessorGraph::removeIllegalConnections()
static void deleteRenderOpArray (Array<void*>& ops)
{
for (int i = ops.size(); --i >= 0;)
delete static_cast<GraphRenderingOps::AudioGraphRenderingOp*> (ops.getUnchecked(i));
delete static_cast<GraphRenderingOps::AudioGraphRenderingOpBase*> (ops.getUnchecked(i));
}
void AudioProcessorGraph::clearRenderingSequence()
@@ -1193,7 +1328,7 @@ void AudioProcessorGraph::buildRenderingSequence()
{
Node* const node = nodes.getUnchecked(i);
node->prepare (getSampleRate(), getBlockSize(), this);
node->prepare (getSampleRate(), getBlockSize(), this, getProcessingPrecision());
int j = 0;
for (; j < orderedNodes.size(); ++j)
@@ -1214,8 +1349,7 @@ void AudioProcessorGraph::buildRenderingSequence()
// swap over to the new rendering sequence..
const ScopedLock sl (getCallbackLock());
renderingBuffers.setSize (numRenderingBuffersNeeded, getBlockSize());
renderingBuffers.clear();
audioBuffers->setRenderingBufferSize (numRenderingBuffersNeeded, getBlockSize());
for (int i = midiBuffers.size(); --i >= 0;)
midiBuffers.getUnchecked(i)->clear();
@@ -1238,8 +1372,8 @@ void AudioProcessorGraph::handleAsyncUpdate()
//==============================================================================
void AudioProcessorGraph::prepareToPlay (double /*sampleRate*/, int estimatedSamplesPerBlock)
{
currentAudioInputBuffer = nullptr;
currentAudioOutputBuffer.setSize (jmax (1, getNumOutputChannels()), estimatedSamplesPerBlock);
audioBuffers->prepareInOutBuffers (jmax (1, getNumOutputChannels()), estimatedSamplesPerBlock);
currentMidiInputBuffer = nullptr;
currentMidiOutputBuffer.clear();
@@ -1247,16 +1381,19 @@ void AudioProcessorGraph::prepareToPlay (double /*sampleRate*/, int estimatedSam
buildRenderingSequence();
}
bool AudioProcessorGraph::supportsDoublePrecisionProcessing() const
{
return true;
}
void AudioProcessorGraph::releaseResources()
{
for (int i = 0; i < nodes.size(); ++i)
nodes.getUnchecked(i)->unprepare();
renderingBuffers.setSize (1, 1);
audioBuffers->release();
midiBuffers.clear();
currentAudioInputBuffer = nullptr;
currentAudioOutputBuffer.setSize (1, 1);
currentMidiInputBuffer = nullptr;
currentMidiOutputBuffer.clear();
}
@@ -1287,8 +1424,13 @@ void AudioProcessorGraph::setPlayHead (AudioPlayHead* audioPlayHead)
nodes.getUnchecked(i)->getProcessor()->setPlayHead (audioPlayHead);
}
void AudioProcessorGraph::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
template <typename FloatType>
void AudioProcessorGraph::processAudio (AudioBuffer<FloatType>& buffer, MidiBuffer& midiMessages)
{
AudioBuffer<FloatType>& renderingBuffers = audioBuffers->renderingBuffers.get<FloatType>();
AudioBuffer<FloatType>*& currentAudioInputBuffer = audioBuffers->currentAudioInputBuffer.get<FloatType>();
AudioBuffer<FloatType>& currentAudioOutputBuffer = audioBuffers->currentAudioOutputBuffer.get<FloatType>();
const int numSamples = buffer.getNumSamples();
currentAudioInputBuffer = &buffer;
@@ -1299,8 +1441,8 @@ void AudioProcessorGraph::processBlock (AudioSampleBuffer& buffer, MidiBuffer& m
for (int i = 0; i < renderingOps.size(); ++i)
{
GraphRenderingOps::AudioGraphRenderingOp* const op
= (GraphRenderingOps::AudioGraphRenderingOp*) renderingOps.getUnchecked(i);
GraphRenderingOps::AudioGraphRenderingOpBase* const op
= (GraphRenderingOps::AudioGraphRenderingOpBase*) renderingOps.getUnchecked(i);
op->perform (renderingBuffers, midiBuffers, numSamples);
}
@@ -1331,6 +1473,21 @@ bool AudioProcessorGraph::producesMidi() const { return tru
void AudioProcessorGraph::getStateInformation (juce::MemoryBlock&) {}
void AudioProcessorGraph::setStateInformation (const void*, int) {}
void AudioProcessorGraph::processBlock (AudioBuffer<float>& buffer, MidiBuffer& midiMessages)
{
processAudio (buffer, midiMessages);
}
void AudioProcessorGraph::processBlock (AudioBuffer<double>& buffer, MidiBuffer& midiMessages)
{
processAudio (buffer, midiMessages);
}
// explicit template instantiation
template void AudioProcessorGraph::processAudio<float> ( AudioBuffer<float>& buffer,
MidiBuffer& midiMessages);
template void AudioProcessorGraph::processAudio<double> (AudioBuffer<double>& buffer,
MidiBuffer& midiMessages);
//==============================================================================
AudioProcessorGraph::AudioGraphIOProcessor::AudioGraphIOProcessor (const IODeviceType deviceType)
@@ -1384,19 +1541,31 @@ void AudioProcessorGraph::AudioGraphIOProcessor::releaseResources()
{
}
void AudioProcessorGraph::AudioGraphIOProcessor::processBlock (AudioSampleBuffer& buffer,
bool AudioProcessorGraph::AudioGraphIOProcessor::supportsDoublePrecisionProcessing() const
{
return true;
}
template <typename FloatType>
void AudioProcessorGraph::AudioGraphIOProcessor::processAudio (AudioBuffer<FloatType>& buffer,
MidiBuffer& midiMessages)
{
AudioBuffer<FloatType>*& currentAudioInputBuffer =
graph->audioBuffers->currentAudioInputBuffer.get<FloatType>();
AudioBuffer<FloatType>& currentAudioOutputBuffer =
graph->audioBuffers->currentAudioOutputBuffer.get<FloatType>();
jassert (graph != nullptr);
switch (type)
{
case audioOutputNode:
{
for (int i = jmin (graph->currentAudioOutputBuffer.getNumChannels(),
for (int i = jmin (currentAudioOutputBuffer.getNumChannels(),
buffer.getNumChannels()); --i >= 0;)
{
graph->currentAudioOutputBuffer.addFrom (i, 0, buffer, i, 0, buffer.getNumSamples());
currentAudioOutputBuffer.addFrom (i, 0, buffer, i, 0, buffer.getNumSamples());
}
break;
@@ -1404,10 +1573,10 @@ void AudioProcessorGraph::AudioGraphIOProcessor::processBlock (AudioSampleBuffer
case audioInputNode:
{
for (int i = jmin (graph->currentAudioInputBuffer->getNumChannels(),
for (int i = jmin (currentAudioInputBuffer->getNumChannels(),
buffer.getNumChannels()); --i >= 0;)
{
buffer.copyFrom (i, 0, *graph->currentAudioInputBuffer, i, 0, buffer.getNumSamples());
buffer.copyFrom (i, 0, *currentAudioInputBuffer, i, 0, buffer.getNumSamples());
}
break;
@@ -1426,6 +1595,18 @@ void AudioProcessorGraph::AudioGraphIOProcessor::processBlock (AudioSampleBuffer
}
}
void AudioProcessorGraph::AudioGraphIOProcessor::processBlock (AudioBuffer<float>& buffer,
MidiBuffer& midiMessages)
{
processAudio (buffer, midiMessages);
}
void AudioProcessorGraph::AudioGraphIOProcessor::processBlock (AudioBuffer<double>& buffer,
MidiBuffer& midiMessages)
{
processAudio (buffer, midiMessages);
}
bool AudioProcessorGraph::AudioGraphIOProcessor::silenceInProducesSilenceOut() const
{
return isOutput();


+ 18
- 7
modules/juce_audio_processors/processors/juce_AudioProcessorGraph.h View File

@@ -25,7 +25,6 @@
#ifndef JUCE_AUDIOPROCESSORGRAPH_H_INCLUDED
#define JUCE_AUDIOPROCESSORGRAPH_H_INCLUDED
//==============================================================================
/**
A type of AudioProcessor which plays back a graph of other AudioProcessors.
@@ -92,7 +91,7 @@ public:
Node (uint32 nodeId, AudioProcessor*) noexcept;
void setParentGraph (AudioProcessorGraph*) const;
void prepare (double newSampleRate, int newBlockSize, AudioProcessorGraph*);
void prepare (double newSampleRate, int newBlockSize, AudioProcessorGraph*, ProcessingPrecision);
void unprepare();
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Node)
@@ -308,7 +307,9 @@ public:
void fillInPluginDescription (PluginDescription&) const override;
void prepareToPlay (double newSampleRate, int estimatedSamplesPerBlock) override;
void releaseResources() override;
void processBlock (AudioSampleBuffer&, MidiBuffer&) override;
void processBlock (AudioBuffer<float>& , MidiBuffer&) override;
void processBlock (AudioBuffer<double>&, MidiBuffer&) override;
bool supportsDoublePrecisionProcessing() const override;
const String getInputChannelName (int channelIndex) const override;
const String getOutputChannelName (int channelIndex) const override;
@@ -338,6 +339,10 @@ public:
const IODeviceType type;
AudioProcessorGraph* graph;
//==============================================================================
template <typename floatType>
void processAudio (AudioBuffer<floatType>& buffer, MidiBuffer& midiMessages);
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (AudioGraphIOProcessor)
};
@@ -345,7 +350,9 @@ public:
const String getName() const override;
void prepareToPlay (double, int) override;
void releaseResources() override;
void processBlock (AudioSampleBuffer&, MidiBuffer&) override;
void processBlock (AudioBuffer<float>&, MidiBuffer&) override;
void processBlock (AudioBuffer<double>&, MidiBuffer&) override;
bool supportsDoublePrecisionProcessing() const override;
void reset() override;
void setNonRealtime (bool) noexcept override;
@@ -372,17 +379,21 @@ public:
void setStateInformation (const void* data, int sizeInBytes) override;
private:
//==============================================================================
template <typename floatType>
void processAudio (AudioBuffer<floatType>& buffer, MidiBuffer& midiMessages);
//==============================================================================
ReferenceCountedArray<Node> nodes;
OwnedArray<Connection> connections;
uint32 lastNodeId;
AudioSampleBuffer renderingBuffers;
OwnedArray<MidiBuffer> midiBuffers;
Array<void*> renderingOps;
friend class AudioGraphIOProcessor;
AudioSampleBuffer* currentAudioInputBuffer;
AudioSampleBuffer currentAudioOutputBuffer;
struct AudioProcessorGraphBufferHelpers;
ScopedPointer<AudioProcessorGraphBufferHelpers> audioBuffers;
MidiBuffer* currentMidiInputBuffer;
MidiBuffer currentMidiOutputBuffer;


+ 41
- 2
modules/juce_audio_utils/players/juce_AudioProcessorPlayer.cpp View File

@@ -22,11 +22,12 @@
==============================================================================
*/
AudioProcessorPlayer::AudioProcessorPlayer()
AudioProcessorPlayer::AudioProcessorPlayer(bool doDoublePrecisionProcessing)
: processor (nullptr),
sampleRate (0),
blockSize (0),
isPrepared (false),
isDoublePrecision (doDoublePrecisionProcessing),
numInputChans (0),
numOutputChans (0)
{
@@ -45,6 +46,12 @@ void AudioProcessorPlayer::setProcessor (AudioProcessor* const processorToPlay)
if (processorToPlay != nullptr && sampleRate > 0 && blockSize > 0)
{
processorToPlay->setPlayConfigDetails (numInputChans, numOutputChans, sampleRate, blockSize);
const bool supportsDouble = processorToPlay->supportsDoublePrecisionProcessing() && isDoublePrecision;
AudioProcessor::ProcessingPrecision precision = supportsDouble ? AudioProcessor::doublePrecision
: AudioProcessor::singlePrecision;
processorToPlay->setProcessingPrecision (precision);
processorToPlay->prepareToPlay (sampleRate, blockSize);
}
@@ -62,6 +69,28 @@ void AudioProcessorPlayer::setProcessor (AudioProcessor* const processorToPlay)
}
}
void AudioProcessorPlayer::setDoublePrecisionProcessing (bool doublePrecision)
{
if (doublePrecision != isDoublePrecision)
{
const ScopedLock sl (lock);
if (processor != nullptr)
{
processor->releaseResources();
const bool supportsDouble = processor->supportsDoublePrecisionProcessing() && doublePrecision;
AudioProcessor::ProcessingPrecision precision = supportsDouble ? AudioProcessor::doublePrecision
: AudioProcessor::singlePrecision;
processor->setProcessingPrecision (precision);
processor->prepareToPlay (sampleRate, blockSize);
}
isDoublePrecision = doublePrecision;
}
}
//==============================================================================
void AudioProcessorPlayer::audioDeviceIOCallback (const float** const inputChannelData,
const int numInputChannels,
@@ -126,7 +155,17 @@ void AudioProcessorPlayer::audioDeviceIOCallback (const float** const inputChann
if (! processor->isSuspended())
{
processor->processBlock (buffer, incomingMidi);
if (processor->isUsingDoublePrecision())
{
conversionBuffer.makeCopyOf (buffer);
processor->processBlock (conversionBuffer, incomingMidi);
buffer.makeCopyOf (conversionBuffer);
}
else
{
processor->processBlock (buffer, incomingMidi);
}
return;
}
}


+ 18
- 4
modules/juce_audio_utils/players/juce_AudioProcessorPlayer.h View File

@@ -25,7 +25,6 @@
#ifndef JUCE_AUDIOPROCESSORPLAYER_H_INCLUDED
#define JUCE_AUDIOPROCESSORPLAYER_H_INCLUDED
//==============================================================================
/**
An AudioIODeviceCallback object which streams audio through an AudioProcessor.
@@ -43,7 +42,7 @@ class JUCE_API AudioProcessorPlayer : public AudioIODeviceCallback,
{
public:
//==============================================================================
AudioProcessorPlayer();
AudioProcessorPlayer(bool doDoublePrecisionProcessing = false);
/** Destructor. */
virtual ~AudioProcessorPlayer();
@@ -65,6 +64,20 @@ public:
*/
MidiMessageCollector& getMidiMessageCollector() noexcept { return messageCollector; }
/** Switch between double and single floating point precisions processing.
The audio IO callbacks will still operate in single floating point
precision, however, all internal processing including the
AudioProcessor will be processed in double floating point precision if
the AudioProcessor supports it (see
AudioProcessor::supportsDoublePrecisionProcessing()).
Otherwise, the processing will remain single precision irrespective of
the parameter doublePrecision. */
void setDoublePrecisionProcessing (bool doublePrecision);
/** Returns true if this player processes internally processes the samples with
double floating point precision. */
inline bool getDoublePrecisionProcessing() { return isDoublePrecision; }
//==============================================================================
/** @internal */
void audioDeviceIOCallback (const float**, int, float**, int, int) override;
@@ -81,11 +94,12 @@ private:
CriticalSection lock;
double sampleRate;
int blockSize;
bool isPrepared;
bool isPrepared, isDoublePrecision;
int numInputChans, numOutputChans;
HeapBlock<float*> channels;
AudioSampleBuffer tempBuffer;
AudioBuffer<float> tempBuffer;
AudioBuffer<double> conversionBuffer;
MidiBuffer incomingMidi;
MidiMessageCollector messageCollector;


Loading…
Cancel
Save