The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1277 lines
48KB

  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE library.
  4. Copyright (c) 2020 - Raw Material Software Limited
  5. JUCE is an open source library subject to commercial or open-source
  6. licensing.
  7. By using JUCE, you agree to the terms of both the JUCE 6 End-User License
  8. Agreement and JUCE Privacy Policy (both effective as of the 16th June 2020).
  9. End User License Agreement: www.juce.com/juce-6-licence
  10. Privacy Policy: www.juce.com/juce-privacy-policy
  11. Or: You may also use this code under the terms of the GPL v3 (see
  12. www.gnu.org/licenses).
  13. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  14. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  15. DISCLAIMED.
  16. ==============================================================================
  17. */
  18. namespace juce
  19. {
  20. namespace dsp
  21. {
  22. template <typename Element>
  23. class Queue
  24. {
  25. public:
  26. explicit Queue (int size)
  27. : fifo (size), storage (static_cast<size_t> (size)) {}
  28. bool push (Element& element) noexcept
  29. {
  30. if (fifo.getFreeSpace() == 0)
  31. return false;
  32. const auto writer = fifo.write (1);
  33. if (writer.blockSize1 != 0)
  34. storage[static_cast<size_t> (writer.startIndex1)] = std::move (element);
  35. else if (writer.blockSize2 != 0)
  36. storage[static_cast<size_t> (writer.startIndex2)] = std::move (element);
  37. return true;
  38. }
  39. template <typename Fn>
  40. void pop (Fn&& fn) { popN (1, std::forward<Fn> (fn)); }
  41. template <typename Fn>
  42. void popAll (Fn&& fn) { popN (fifo.getNumReady(), std::forward<Fn> (fn)); }
  43. bool hasPendingMessages() const noexcept { return fifo.getNumReady() > 0; }
  44. private:
  45. template <typename Fn>
  46. void popN (int n, Fn&& fn)
  47. {
  48. fifo.read (n).forEach ([&] (int index)
  49. {
  50. fn (storage[static_cast<size_t> (index)]);
  51. });
  52. }
  53. AbstractFifo fifo;
  54. std::vector<Element> storage;
  55. };
  56. class BackgroundMessageQueue : private Thread
  57. {
  58. public:
  59. explicit BackgroundMessageQueue (int entries)
  60. : Thread ("Convolution background loader"), queue (entries)
  61. {}
  62. using IncomingCommand = FixedSizeFunction<400, void()>;
  63. // Push functions here, and they'll be called later on a background thread.
  64. // This function is wait-free.
  65. // This function is only safe to call from a single thread at a time.
  66. bool push (IncomingCommand& command) { return queue.push (command); }
  67. using Thread::startThread;
  68. using Thread::stopThread;
  69. private:
  70. void run() override
  71. {
  72. while (! threadShouldExit())
  73. {
  74. if (queue.hasPendingMessages())
  75. queue.pop ([] (IncomingCommand& command) { command(); command = nullptr;});
  76. else
  77. sleep (10);
  78. }
  79. }
  80. Queue<IncomingCommand> queue;
  81. JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (BackgroundMessageQueue)
  82. };
  83. struct ConvolutionMessageQueue::Impl : public BackgroundMessageQueue
  84. {
  85. using BackgroundMessageQueue::BackgroundMessageQueue;
  86. };
  87. ConvolutionMessageQueue::ConvolutionMessageQueue()
  88. : ConvolutionMessageQueue (1000)
  89. {}
  90. ConvolutionMessageQueue::ConvolutionMessageQueue (int entries)
  91. : pimpl (std::make_unique<Impl> (entries))
  92. {
  93. pimpl->startThread();
  94. }
  95. ConvolutionMessageQueue::~ConvolutionMessageQueue() noexcept
  96. {
  97. pimpl->stopThread (-1);
  98. }
  99. ConvolutionMessageQueue::ConvolutionMessageQueue (ConvolutionMessageQueue&&) noexcept = default;
  100. ConvolutionMessageQueue& ConvolutionMessageQueue::operator= (ConvolutionMessageQueue&&) noexcept = default;
  101. //==============================================================================
  102. struct ConvolutionEngine
  103. {
  104. ConvolutionEngine (const float* samples,
  105. size_t numSamples,
  106. size_t maxBlockSize)
  107. : blockSize ((size_t) nextPowerOfTwo ((int) maxBlockSize)),
  108. fftSize (blockSize > 128 ? 2 * blockSize : 4 * blockSize),
  109. fftObject (std::make_unique<FFT> (roundToInt (std::log2 (fftSize)))),
  110. numSegments (numSamples / (fftSize - blockSize) + 1u),
  111. numInputSegments ((blockSize > 128 ? numSegments : 3 * numSegments)),
  112. bufferInput (1, static_cast<int> (fftSize)),
  113. bufferOutput (1, static_cast<int> (fftSize * 2)),
  114. bufferTempOutput (1, static_cast<int> (fftSize * 2)),
  115. bufferOverlap (1, static_cast<int> (fftSize))
  116. {
  117. bufferOutput.clear();
  118. auto updateSegmentsIfNecessary = [this] (size_t numSegmentsToUpdate,
  119. std::vector<AudioBuffer<float>>& segments)
  120. {
  121. if (numSegmentsToUpdate == 0
  122. || numSegmentsToUpdate != (size_t) segments.size()
  123. || (size_t) segments[0].getNumSamples() != fftSize * 2)
  124. {
  125. segments.clear();
  126. for (size_t i = 0; i < numSegmentsToUpdate; ++i)
  127. segments.push_back ({ 1, static_cast<int> (fftSize * 2) });
  128. }
  129. };
  130. updateSegmentsIfNecessary (numInputSegments, buffersInputSegments);
  131. updateSegmentsIfNecessary (numSegments, buffersImpulseSegments);
  132. auto FFTTempObject = std::make_unique<FFT> (roundToInt (std::log2 (fftSize)));
  133. size_t currentPtr = 0;
  134. for (auto& buf : buffersImpulseSegments)
  135. {
  136. buf.clear();
  137. auto* impulseResponse = buf.getWritePointer (0);
  138. if (&buf == &buffersImpulseSegments.front())
  139. impulseResponse[0] = 1.0f;
  140. FloatVectorOperations::copy (impulseResponse,
  141. samples + currentPtr,
  142. static_cast<int> (jmin (fftSize - blockSize, numSamples - currentPtr)));
  143. FFTTempObject->performRealOnlyForwardTransform (impulseResponse);
  144. prepareForConvolution (impulseResponse);
  145. currentPtr += (fftSize - blockSize);
  146. }
  147. reset();
  148. }
  149. void reset()
  150. {
  151. bufferInput.clear();
  152. bufferOverlap.clear();
  153. bufferTempOutput.clear();
  154. bufferOutput.clear();
  155. for (auto& buf : buffersInputSegments)
  156. buf.clear();
  157. currentSegment = 0;
  158. inputDataPos = 0;
  159. }
  160. void processSamples (const float* input, float* output, size_t numSamples)
  161. {
  162. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  163. size_t numSamplesProcessed = 0;
  164. auto indexStep = numInputSegments / numSegments;
  165. auto* inputData = bufferInput.getWritePointer (0);
  166. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  167. auto* outputData = bufferOutput.getWritePointer (0);
  168. auto* overlapData = bufferOverlap.getWritePointer (0);
  169. while (numSamplesProcessed < numSamples)
  170. {
  171. const bool inputDataWasEmpty = (inputDataPos == 0);
  172. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  173. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  174. auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
  175. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (fftSize));
  176. fftObject->performRealOnlyForwardTransform (inputSegmentData);
  177. prepareForConvolution (inputSegmentData);
  178. // Complex multiplication
  179. if (inputDataWasEmpty)
  180. {
  181. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (fftSize + 1));
  182. auto index = currentSegment;
  183. for (size_t i = 1; i < numSegments; ++i)
  184. {
  185. index += indexStep;
  186. if (index >= numInputSegments)
  187. index -= numInputSegments;
  188. convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
  189. buffersImpulseSegments[i].getWritePointer (0),
  190. outputTempData);
  191. }
  192. }
  193. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (fftSize + 1));
  194. convolutionProcessingAndAccumulate (inputSegmentData,
  195. buffersImpulseSegments.front().getWritePointer (0),
  196. outputData);
  197. updateSymmetricFrequencyDomainData (outputData);
  198. fftObject->performRealOnlyInverseTransform (outputData);
  199. // Add overlap
  200. FloatVectorOperations::add (&output[numSamplesProcessed], &outputData[inputDataPos], &overlapData[inputDataPos], (int) numSamplesToProcess);
  201. // Input buffer full => Next block
  202. inputDataPos += numSamplesToProcess;
  203. if (inputDataPos == blockSize)
  204. {
  205. // Input buffer is empty again now
  206. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (fftSize));
  207. inputDataPos = 0;
  208. // Extra step for segSize > blockSize
  209. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (fftSize - 2 * blockSize));
  210. // Save the overlap
  211. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (fftSize - blockSize));
  212. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  213. }
  214. numSamplesProcessed += numSamplesToProcess;
  215. }
  216. }
  217. void processSamplesWithAddedLatency (const float* input, float* output, size_t numSamples)
  218. {
  219. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  220. size_t numSamplesProcessed = 0;
  221. auto indexStep = numInputSegments / numSegments;
  222. auto* inputData = bufferInput.getWritePointer (0);
  223. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  224. auto* outputData = bufferOutput.getWritePointer (0);
  225. auto* overlapData = bufferOverlap.getWritePointer (0);
  226. while (numSamplesProcessed < numSamples)
  227. {
  228. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  229. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  230. FloatVectorOperations::copy (output + numSamplesProcessed, outputData + inputDataPos, static_cast<int> (numSamplesToProcess));
  231. numSamplesProcessed += numSamplesToProcess;
  232. inputDataPos += numSamplesToProcess;
  233. // processing itself when needed (with latency)
  234. if (inputDataPos == blockSize)
  235. {
  236. // Copy input data in input segment
  237. auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
  238. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (fftSize));
  239. fftObject->performRealOnlyForwardTransform (inputSegmentData);
  240. prepareForConvolution (inputSegmentData);
  241. // Complex multiplication
  242. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (fftSize + 1));
  243. auto index = currentSegment;
  244. for (size_t i = 1; i < numSegments; ++i)
  245. {
  246. index += indexStep;
  247. if (index >= numInputSegments)
  248. index -= numInputSegments;
  249. convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
  250. buffersImpulseSegments[i].getWritePointer (0),
  251. outputTempData);
  252. }
  253. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (fftSize + 1));
  254. convolutionProcessingAndAccumulate (inputSegmentData,
  255. buffersImpulseSegments.front().getWritePointer (0),
  256. outputData);
  257. updateSymmetricFrequencyDomainData (outputData);
  258. fftObject->performRealOnlyInverseTransform (outputData);
  259. // Add overlap
  260. FloatVectorOperations::add (outputData, overlapData, static_cast<int> (blockSize));
  261. // Input buffer is empty again now
  262. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (fftSize));
  263. // Extra step for segSize > blockSize
  264. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (fftSize - 2 * blockSize));
  265. // Save the overlap
  266. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (fftSize - blockSize));
  267. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  268. inputDataPos = 0;
  269. }
  270. }
  271. }
  272. // After each FFT, this function is called to allow convolution to be performed with only 4 SIMD functions calls.
  273. void prepareForConvolution (float *samples) noexcept
  274. {
  275. auto FFTSizeDiv2 = fftSize / 2;
  276. for (size_t i = 0; i < FFTSizeDiv2; i++)
  277. samples[i] = samples[i << 1];
  278. samples[FFTSizeDiv2] = 0;
  279. for (size_t i = 1; i < FFTSizeDiv2; i++)
  280. samples[i + FFTSizeDiv2] = -samples[((fftSize - i) << 1) + 1];
  281. }
  282. // Does the convolution operation itself only on half of the frequency domain samples.
  283. void convolutionProcessingAndAccumulate (const float *input, const float *impulse, float *output)
  284. {
  285. auto FFTSizeDiv2 = fftSize / 2;
  286. FloatVectorOperations::addWithMultiply (output, input, impulse, static_cast<int> (FFTSizeDiv2));
  287. FloatVectorOperations::subtractWithMultiply (output, &(input[FFTSizeDiv2]), &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  288. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), input, &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  289. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), &(input[FFTSizeDiv2]), impulse, static_cast<int> (FFTSizeDiv2));
  290. output[fftSize] += input[fftSize] * impulse[fftSize];
  291. }
  292. // Undoes the re-organization of samples from the function prepareForConvolution.
  293. // Then takes the conjugate of the frequency domain first half of samples to fill the
  294. // second half, so that the inverse transform will return real samples in the time domain.
  295. void updateSymmetricFrequencyDomainData (float* samples) noexcept
  296. {
  297. auto FFTSizeDiv2 = fftSize / 2;
  298. for (size_t i = 1; i < FFTSizeDiv2; i++)
  299. {
  300. samples[(fftSize - i) << 1] = samples[i];
  301. samples[((fftSize - i) << 1) + 1] = -samples[FFTSizeDiv2 + i];
  302. }
  303. samples[1] = 0.f;
  304. for (size_t i = 1; i < FFTSizeDiv2; i++)
  305. {
  306. samples[i << 1] = samples[(fftSize - i) << 1];
  307. samples[(i << 1) + 1] = -samples[((fftSize - i) << 1) + 1];
  308. }
  309. }
  310. //==============================================================================
  311. const size_t blockSize;
  312. const size_t fftSize;
  313. const std::unique_ptr<FFT> fftObject;
  314. const size_t numSegments;
  315. const size_t numInputSegments;
  316. size_t currentSegment = 0, inputDataPos = 0;
  317. AudioBuffer<float> bufferInput, bufferOutput, bufferTempOutput, bufferOverlap;
  318. std::vector<AudioBuffer<float>> buffersInputSegments, buffersImpulseSegments;
  319. };
  320. //==============================================================================
  321. class MultichannelEngine
  322. {
  323. public:
  324. MultichannelEngine (const AudioBuffer<float>& buf,
  325. int maxBlockSize,
  326. int maxBufferSize,
  327. Convolution::NonUniform headSizeIn,
  328. bool isZeroDelayIn)
  329. : tailBuffer (1, maxBlockSize),
  330. latency (isZeroDelayIn ? 0 : maxBufferSize),
  331. irSize (buf.getNumSamples()),
  332. blockSize (maxBlockSize),
  333. isZeroDelay (isZeroDelayIn)
  334. {
  335. constexpr auto numChannels = 2;
  336. const auto makeEngine = [&] (int channel, int offset, int length, uint32 thisBlockSize)
  337. {
  338. return std::make_unique<ConvolutionEngine> (buf.getReadPointer (jmin (buf.getNumChannels() - 1, channel), offset),
  339. length,
  340. static_cast<size_t> (thisBlockSize));
  341. };
  342. if (headSizeIn.headSizeInSamples == 0)
  343. {
  344. for (int i = 0; i < numChannels; ++i)
  345. head.emplace_back (makeEngine (i, 0, buf.getNumSamples(), static_cast<uint32> (maxBufferSize)));
  346. }
  347. else
  348. {
  349. const auto size = jmin (buf.getNumSamples(), headSizeIn.headSizeInSamples);
  350. for (int i = 0; i < numChannels; ++i)
  351. head.emplace_back (makeEngine (i, 0, size, static_cast<uint32> (maxBufferSize)));
  352. const auto tailBufferSize = static_cast<uint32> (headSizeIn.headSizeInSamples + (isZeroDelay ? 0 : maxBufferSize));
  353. if (size != buf.getNumSamples())
  354. for (int i = 0; i < numChannels; ++i)
  355. tail.emplace_back (makeEngine (i, size, buf.getNumSamples() - size, tailBufferSize));
  356. }
  357. }
  358. void reset()
  359. {
  360. for (const auto& e : head)
  361. e->reset();
  362. for (const auto& e : tail)
  363. e->reset();
  364. }
  365. void processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output)
  366. {
  367. const auto numChannels = jmin (head.size(), input.getNumChannels(), output.getNumChannels());
  368. const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  369. const AudioBlock<float> fullTailBlock (tailBuffer);
  370. const auto tailBlock = fullTailBlock.getSubBlock (0, (size_t) numSamples);
  371. const auto isUniform = tail.empty();
  372. for (size_t channel = 0; channel < numChannels; ++channel)
  373. {
  374. if (! isUniform)
  375. tail[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
  376. tailBlock.getChannelPointer (0),
  377. numSamples);
  378. if (isZeroDelay)
  379. head[channel]->processSamples (input.getChannelPointer (channel),
  380. output.getChannelPointer (channel),
  381. numSamples);
  382. else
  383. head[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
  384. output.getChannelPointer (channel),
  385. numSamples);
  386. if (! isUniform)
  387. output.getSingleChannelBlock (channel) += tailBlock;
  388. }
  389. const auto numOutputChannels = output.getNumChannels();
  390. for (auto i = numChannels; i < numOutputChannels; ++i)
  391. output.getSingleChannelBlock (i).copyFrom (output.getSingleChannelBlock (0));
  392. }
  393. int getIRSize() const noexcept { return irSize; }
  394. int getLatency() const noexcept { return latency; }
  395. int getBlockSize() const noexcept { return blockSize; }
  396. private:
  397. std::vector<std::unique_ptr<ConvolutionEngine>> head, tail;
  398. AudioBuffer<float> tailBuffer;
  399. const int latency;
  400. const int irSize;
  401. const int blockSize;
  402. const bool isZeroDelay;
  403. };
  404. static AudioBuffer<float> fixNumChannels (const AudioBuffer<float>& buf, Convolution::Stereo stereo)
  405. {
  406. const auto numChannels = jmin (buf.getNumChannels(), stereo == Convolution::Stereo::yes ? 2 : 1);
  407. const auto numSamples = buf.getNumSamples();
  408. AudioBuffer<float> result (numChannels, buf.getNumSamples());
  409. for (auto channel = 0; channel != numChannels; ++channel)
  410. result.copyFrom (channel, 0, buf.getReadPointer (channel), numSamples);
  411. if (result.getNumSamples() == 0 || result.getNumChannels() == 0)
  412. {
  413. result.setSize (1, 1);
  414. result.setSample (0, 0, 1.0f);
  415. }
  416. return result;
  417. }
  418. static AudioBuffer<float> trimImpulseResponse (const AudioBuffer<float>& buf)
  419. {
  420. const auto thresholdTrim = Decibels::decibelsToGain (-80.0f);
  421. const auto numChannels = buf.getNumChannels();
  422. const auto numSamples = buf.getNumSamples();
  423. std::ptrdiff_t offsetBegin = numSamples;
  424. std::ptrdiff_t offsetEnd = numSamples;
  425. for (auto channel = 0; channel < numChannels; ++channel)
  426. {
  427. const auto indexAboveThreshold = [&] (auto begin, auto end)
  428. {
  429. return std::distance (begin, std::find_if (begin, end, [&] (float sample)
  430. {
  431. return std::abs (sample) >= thresholdTrim;
  432. }));
  433. };
  434. const auto channelBegin = buf.getReadPointer (channel);
  435. const auto channelEnd = channelBegin + numSamples;
  436. const auto itStart = indexAboveThreshold (channelBegin, channelEnd);
  437. const auto itEnd = indexAboveThreshold (std::make_reverse_iterator (channelEnd),
  438. std::make_reverse_iterator (channelBegin));
  439. offsetBegin = jmin (offsetBegin, itStart);
  440. offsetEnd = jmin (offsetEnd, itEnd);
  441. }
  442. if (offsetBegin == numSamples)
  443. {
  444. auto result = AudioBuffer<float> (numChannels, 1);
  445. result.clear();
  446. return result;
  447. }
  448. const auto newLength = jmax (1, numSamples - static_cast<int> (offsetBegin + offsetEnd));
  449. AudioBuffer<float> result (numChannels, newLength);
  450. for (auto channel = 0; channel < numChannels; ++channel)
  451. {
  452. result.copyFrom (channel,
  453. 0,
  454. buf.getReadPointer (channel, static_cast<int> (offsetBegin)),
  455. result.getNumSamples());
  456. }
  457. return result;
  458. }
  459. static float calculateNormalisationFactor (float sumSquaredMagnitude)
  460. {
  461. if (sumSquaredMagnitude < 1e-8f)
  462. return 1.0f;
  463. return 0.125f / std::sqrt (sumSquaredMagnitude);
  464. }
  465. static void normaliseImpulseResponse (AudioBuffer<float>& buf)
  466. {
  467. const auto numChannels = buf.getNumChannels();
  468. const auto numSamples = buf.getNumSamples();
  469. const auto channelPtrs = buf.getArrayOfWritePointers();
  470. const auto maxSumSquaredMag = std::accumulate (channelPtrs, channelPtrs + numChannels, 0.0f, [numSamples] (auto max, auto* channel)
  471. {
  472. return jmax (max, std::accumulate (channel, channel + numSamples, 0.0f, [] (auto sum, auto samp)
  473. {
  474. return sum + (samp * samp);
  475. }));
  476. });
  477. const auto normalisationFactor = calculateNormalisationFactor (maxSumSquaredMag);
  478. std::for_each (channelPtrs, channelPtrs + numChannels, [normalisationFactor, numSamples] (auto* channel)
  479. {
  480. FloatVectorOperations::multiply (channel, normalisationFactor, numSamples);
  481. });
  482. }
  483. static AudioBuffer<float> resampleImpulseResponse (const AudioBuffer<float>& buf,
  484. const double srcSampleRate,
  485. const double destSampleRate)
  486. {
  487. if (srcSampleRate == destSampleRate)
  488. return buf;
  489. const auto factorReading = srcSampleRate / destSampleRate;
  490. AudioBuffer<float> original = buf;
  491. MemoryAudioSource memorySource (original, false);
  492. ResamplingAudioSource resamplingSource (&memorySource, false, buf.getNumChannels());
  493. const auto finalSize = roundToInt (jmax (1.0, buf.getNumSamples() / factorReading));
  494. resamplingSource.setResamplingRatio (factorReading);
  495. resamplingSource.prepareToPlay (finalSize, srcSampleRate);
  496. AudioBuffer<float> result (buf.getNumChannels(), finalSize);
  497. resamplingSource.getNextAudioBlock ({ &result, 0, result.getNumSamples() });
  498. return result;
  499. }
  500. //==============================================================================
  501. template <typename Element>
  502. class TryLockedPtr
  503. {
  504. public:
  505. void set (std::unique_ptr<Element> p)
  506. {
  507. const SpinLock::ScopedLockType lock (mutex);
  508. ptr = std::move (p);
  509. }
  510. std::unique_ptr<MultichannelEngine> get()
  511. {
  512. const SpinLock::ScopedTryLockType lock (mutex);
  513. return lock.isLocked() ? std::move (ptr) : nullptr;
  514. }
  515. private:
  516. std::unique_ptr<Element> ptr;
  517. SpinLock mutex;
  518. };
  519. struct BufferWithSampleRate
  520. {
  521. BufferWithSampleRate() = default;
  522. BufferWithSampleRate (AudioBuffer<float>&& bufferIn, double sampleRateIn)
  523. : buffer (std::move (bufferIn)), sampleRate (sampleRateIn) {}
  524. AudioBuffer<float> buffer;
  525. double sampleRate = 0.0;
  526. };
  527. static BufferWithSampleRate loadStreamToBuffer (std::unique_ptr<InputStream> stream, size_t maxLength)
  528. {
  529. AudioFormatManager manager;
  530. manager.registerBasicFormats();
  531. std::unique_ptr<AudioFormatReader> formatReader (manager.createReaderFor (std::move (stream)));
  532. if (formatReader == nullptr)
  533. return {};
  534. const auto fileLength = static_cast<size_t> (formatReader->lengthInSamples);
  535. const auto lengthToLoad = maxLength == 0 ? fileLength : jmin (maxLength, fileLength);
  536. BufferWithSampleRate result { { jlimit (1, 2, static_cast<int> (formatReader->numChannels)),
  537. static_cast<int> (lengthToLoad) },
  538. formatReader->sampleRate };
  539. formatReader->read (result.buffer.getArrayOfWritePointers(),
  540. result.buffer.getNumChannels(),
  541. 0,
  542. result.buffer.getNumSamples());
  543. return result;
  544. }
  545. // This class caches the data required to build a new convolution engine
  546. // (in particular, impulse response data and a ProcessSpec).
  547. // Calls to `setProcessSpec` and `setImpulseResponse` construct a
  548. // new engine, which can be retrieved by calling `getEngine`.
  549. class ConvolutionEngineFactory
  550. {
  551. public:
  552. ConvolutionEngineFactory (Convolution::Latency requiredLatency,
  553. Convolution::NonUniform requiredHeadSize)
  554. : latency { (requiredLatency.latencyInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredLatency.latencyInSamples)) },
  555. headSize { (requiredHeadSize.headSizeInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredHeadSize.headSizeInSamples)) },
  556. shouldBeZeroLatency (requiredLatency.latencyInSamples == 0)
  557. {}
  558. // It is safe to call this method simultaneously with other public
  559. // member functions.
  560. void setProcessSpec (const ProcessSpec& spec)
  561. {
  562. const std::lock_guard<std::mutex> lock (mutex);
  563. processSpec = spec;
  564. engine.set (makeEngine());
  565. }
  566. // It is safe to call this method simultaneously with other public
  567. // member functions.
  568. void setImpulseResponse (BufferWithSampleRate&& buf,
  569. Convolution::Stereo stereo,
  570. Convolution::Trim trim,
  571. Convolution::Normalise normalise)
  572. {
  573. const std::lock_guard<std::mutex> lock (mutex);
  574. wantsNormalise = normalise;
  575. originalSampleRate = buf.sampleRate;
  576. impulseResponse = [&]
  577. {
  578. auto corrected = fixNumChannels (buf.buffer, stereo);
  579. return trim == Convolution::Trim::yes ? trimImpulseResponse (corrected) : corrected;
  580. }();
  581. engine.set (makeEngine());
  582. }
  583. // Returns the most recently-created engine, or nullptr
  584. // if there is no pending engine, or if the engine is currently
  585. // being updated by one of the setter methods.
  586. // It is safe to call this simultaneously with other public
  587. // member functions.
  588. std::unique_ptr<MultichannelEngine> getEngine() { return engine.get(); }
  589. private:
  590. std::unique_ptr<MultichannelEngine> makeEngine()
  591. {
  592. auto resampled = resampleImpulseResponse (impulseResponse, originalSampleRate, processSpec.sampleRate);
  593. if (wantsNormalise == Convolution::Normalise::yes)
  594. normaliseImpulseResponse (resampled);
  595. const auto currentLatency = jmax (processSpec.maximumBlockSize, (uint32) latency.latencyInSamples);
  596. const auto maxBufferSize = shouldBeZeroLatency ? static_cast<int> (processSpec.maximumBlockSize)
  597. : nextPowerOfTwo (static_cast<int> (currentLatency));
  598. return std::make_unique<MultichannelEngine> (resampled,
  599. processSpec.maximumBlockSize,
  600. maxBufferSize,
  601. headSize,
  602. shouldBeZeroLatency);
  603. }
  604. static AudioBuffer<float> makeImpulseBuffer()
  605. {
  606. AudioBuffer<float> result (1, 1);
  607. result.setSample (0, 0, 1.0f);
  608. return result;
  609. }
  610. ProcessSpec processSpec { 44100.0, 128, 2 };
  611. AudioBuffer<float> impulseResponse = makeImpulseBuffer();
  612. double originalSampleRate = processSpec.sampleRate;
  613. Convolution::Normalise wantsNormalise = Convolution::Normalise::no;
  614. const Convolution::Latency latency;
  615. const Convolution::NonUniform headSize;
  616. const bool shouldBeZeroLatency;
  617. TryLockedPtr<MultichannelEngine> engine;
  618. mutable std::mutex mutex;
  619. };
  620. static void setImpulseResponse (ConvolutionEngineFactory& factory,
  621. const void* sourceData,
  622. size_t sourceDataSize,
  623. Convolution::Stereo stereo,
  624. Convolution::Trim trim,
  625. size_t size,
  626. Convolution::Normalise normalise)
  627. {
  628. factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<MemoryInputStream> (sourceData, sourceDataSize, false), size),
  629. stereo, trim, normalise);
  630. }
  631. static void setImpulseResponse (ConvolutionEngineFactory& factory,
  632. const File& fileImpulseResponse,
  633. Convolution::Stereo stereo,
  634. Convolution::Trim trim,
  635. size_t size,
  636. Convolution::Normalise normalise)
  637. {
  638. factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<FileInputStream> (fileImpulseResponse), size),
  639. stereo, trim, normalise);
  640. }
  641. // This class acts as a destination for convolution engines which are loaded on
  642. // a background thread.
  643. // Deriving from `enable_shared_from_this` allows us to capture a reference to
  644. // this object when adding commands to the background message queue.
  645. // That way, we can avoid dangling references in the background thread in the case
  646. // that a Convolution instance is deleted before the background message queue.
  647. class ConvolutionEngineQueue : public std::enable_shared_from_this<ConvolutionEngineQueue>
  648. {
  649. public:
  650. ConvolutionEngineQueue (BackgroundMessageQueue& queue,
  651. Convolution::Latency latencyIn,
  652. Convolution::NonUniform headSizeIn)
  653. : messageQueue (queue), factory (latencyIn, headSizeIn) {}
  654. void loadImpulseResponse (AudioBuffer<float>&& buffer,
  655. double sr,
  656. Convolution::Stereo stereo,
  657. Convolution::Trim trim,
  658. Convolution::Normalise normalise)
  659. {
  660. callLater ([b = std::move (buffer), sr, stereo, trim, normalise] (ConvolutionEngineFactory& f) mutable
  661. {
  662. f.setImpulseResponse ({ std::move (b), sr }, stereo, trim, normalise);
  663. });
  664. }
  665. void loadImpulseResponse (const void* sourceData,
  666. size_t sourceDataSize,
  667. Convolution::Stereo stereo,
  668. Convolution::Trim trim,
  669. size_t size,
  670. Convolution::Normalise normalise)
  671. {
  672. callLater ([sourceData, sourceDataSize, stereo, trim, size, normalise] (ConvolutionEngineFactory& f) mutable
  673. {
  674. setImpulseResponse (f, sourceData, sourceDataSize, stereo, trim, size, normalise);
  675. });
  676. }
  677. void loadImpulseResponse (const File& fileImpulseResponse,
  678. Convolution::Stereo stereo,
  679. Convolution::Trim trim,
  680. size_t size,
  681. Convolution::Normalise normalise)
  682. {
  683. callLater ([fileImpulseResponse, stereo, trim, size, normalise] (ConvolutionEngineFactory& f) mutable
  684. {
  685. setImpulseResponse (f, fileImpulseResponse, stereo, trim, size, normalise);
  686. });
  687. }
  688. void prepare (const ProcessSpec& spec)
  689. {
  690. factory.setProcessSpec (spec);
  691. }
  692. // Call this regularly to try to resend any pending message.
  693. // This allows us to always apply the most recently requested
  694. // state (eventually), even if the message queue fills up.
  695. void postPendingCommand()
  696. {
  697. if (pendingCommand == nullptr)
  698. return;
  699. if (messageQueue.push (pendingCommand))
  700. pendingCommand = nullptr;
  701. }
  702. std::unique_ptr<MultichannelEngine> getEngine() { return factory.getEngine(); }
  703. private:
  704. template <typename Fn>
  705. void callLater (Fn&& fn)
  706. {
  707. // If there was already a pending command (because the queue was full) we'll end up deleting it here.
  708. // Not much we can do about that!
  709. pendingCommand = [weak = weakFromThis(), callback = std::forward<Fn> (fn)]() mutable
  710. {
  711. if (auto t = weak.lock())
  712. callback (t->factory);
  713. };
  714. postPendingCommand();
  715. }
  716. std::weak_ptr<ConvolutionEngineQueue> weakFromThis() { return shared_from_this(); }
  717. BackgroundMessageQueue& messageQueue;
  718. ConvolutionEngineFactory factory;
  719. BackgroundMessageQueue::IncomingCommand pendingCommand;
  720. };
  721. class CrossoverMixer
  722. {
  723. public:
  724. void reset()
  725. {
  726. smoother.setCurrentAndTargetValue (1.0f);
  727. }
  728. void prepare (const ProcessSpec& spec)
  729. {
  730. smoother.reset (spec.sampleRate, 0.05);
  731. smootherBuffer.setSize (1, static_cast<int> (spec.maximumBlockSize));
  732. mixBuffer.setSize (static_cast<int> (spec.numChannels), static_cast<int> (spec.maximumBlockSize));
  733. reset();
  734. }
  735. template <typename ProcessCurrent, typename ProcessPrevious, typename NotifyDone>
  736. void processSamples (const AudioBlock<const float>& input,
  737. AudioBlock<float>& output,
  738. ProcessCurrent&& current,
  739. ProcessPrevious&& previous,
  740. NotifyDone&& notifyDone)
  741. {
  742. if (smoother.isSmoothing())
  743. {
  744. const auto numSamples = static_cast<int> (input.getNumSamples());
  745. for (auto sample = 0; sample != numSamples; ++sample)
  746. smootherBuffer.setSample (0, sample, smoother.getNextValue());
  747. AudioBlock<float> mixBlock (mixBuffer);
  748. mixBlock.clear();
  749. previous (input, mixBlock);
  750. for (size_t channel = 0; channel != output.getNumChannels(); ++channel)
  751. {
  752. FloatVectorOperations::multiply (mixBlock.getChannelPointer (channel),
  753. smootherBuffer.getReadPointer (0),
  754. numSamples);
  755. }
  756. FloatVectorOperations::multiply (smootherBuffer.getWritePointer (0), -1.0f, numSamples);
  757. FloatVectorOperations::add (smootherBuffer.getWritePointer (0), 1.0f, numSamples);
  758. current (input, output);
  759. for (size_t channel = 0; channel != output.getNumChannels(); ++channel)
  760. {
  761. FloatVectorOperations::multiply (output.getChannelPointer (channel),
  762. smootherBuffer.getReadPointer (0),
  763. numSamples);
  764. FloatVectorOperations::add (output.getChannelPointer (channel),
  765. mixBlock.getChannelPointer (channel),
  766. numSamples);
  767. }
  768. if (! smoother.isSmoothing())
  769. notifyDone();
  770. }
  771. else
  772. {
  773. current (input, output);
  774. }
  775. }
  776. void beginTransition()
  777. {
  778. smoother.setCurrentAndTargetValue (1.0f);
  779. smoother.setTargetValue (0.0f);
  780. }
  781. private:
  782. LinearSmoothedValue<float> smoother;
  783. AudioBuffer<float> smootherBuffer;
  784. AudioBuffer<float> mixBuffer;
  785. };
  786. using OptionalQueue = OptionalScopedPointer<ConvolutionMessageQueue>;
  787. class Convolution::Impl
  788. {
  789. public:
  790. Impl (Latency requiredLatency,
  791. NonUniform requiredHeadSize,
  792. OptionalQueue&& queue)
  793. : messageQueue (std::move (queue)),
  794. engineQueue (std::make_shared<ConvolutionEngineQueue> (*messageQueue->pimpl,
  795. requiredLatency,
  796. requiredHeadSize))
  797. {}
  798. void reset()
  799. {
  800. mixer.reset();
  801. if (currentEngine != nullptr)
  802. currentEngine->reset();
  803. destroyPreviousEngine();
  804. }
  805. void prepare (const ProcessSpec& spec)
  806. {
  807. mixer.prepare (spec);
  808. engineQueue->prepare (spec);
  809. currentEngine = engineQueue->getEngine();
  810. previousEngine = nullptr;
  811. jassert (currentEngine != nullptr);
  812. }
  813. void processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output)
  814. {
  815. engineQueue->postPendingCommand();
  816. if (previousEngine == nullptr)
  817. installPendingEngine();
  818. mixer.processSamples (input,
  819. output,
  820. [this] (const AudioBlock<const float>& in, AudioBlock<float>& out)
  821. {
  822. currentEngine->processSamples (in, out);
  823. },
  824. [this] (const AudioBlock<const float>& in, AudioBlock<float>& out)
  825. {
  826. if (previousEngine != nullptr)
  827. previousEngine->processSamples (in, out);
  828. else
  829. out.copyFrom (in);
  830. },
  831. [this] { destroyPreviousEngine(); });
  832. }
  833. int getCurrentIRSize() const { return currentEngine != nullptr ? currentEngine->getIRSize() : 0; }
  834. int getLatency() const { return currentEngine != nullptr ? currentEngine->getLatency() : 0; }
  835. void loadImpulseResponse (AudioBuffer<float>&& buffer,
  836. double originalSampleRate,
  837. Stereo stereo,
  838. Trim trim,
  839. Normalise normalise)
  840. {
  841. engineQueue->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
  842. }
  843. void loadImpulseResponse (const void* sourceData,
  844. size_t sourceDataSize,
  845. Stereo stereo,
  846. Trim trim,
  847. size_t size,
  848. Normalise normalise)
  849. {
  850. engineQueue->loadImpulseResponse (sourceData, sourceDataSize, stereo, trim, size, normalise);
  851. }
  852. void loadImpulseResponse (const File& fileImpulseResponse,
  853. Stereo stereo,
  854. Trim trim,
  855. size_t size,
  856. Normalise normalise)
  857. {
  858. engineQueue->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
  859. }
  860. private:
  861. void destroyPreviousEngine()
  862. {
  863. // If the queue is full, we'll destroy this straight away
  864. BackgroundMessageQueue::IncomingCommand command = [p = std::move (previousEngine)]() mutable { p = nullptr; };
  865. messageQueue->pimpl->push (command);
  866. }
  867. void installNewEngine (std::unique_ptr<MultichannelEngine> newEngine)
  868. {
  869. destroyPreviousEngine();
  870. previousEngine = std::move (currentEngine);
  871. currentEngine = std::move (newEngine);
  872. mixer.beginTransition();
  873. }
  874. void installPendingEngine()
  875. {
  876. if (auto newEngine = engineQueue->getEngine())
  877. installNewEngine (std::move (newEngine));
  878. }
  879. OptionalQueue messageQueue;
  880. std::shared_ptr<ConvolutionEngineQueue> engineQueue;
  881. std::unique_ptr<MultichannelEngine> previousEngine, currentEngine;
  882. CrossoverMixer mixer;
  883. };
  884. //==============================================================================
  885. void Convolution::Mixer::prepare (const ProcessSpec& spec)
  886. {
  887. for (auto& dry : volumeDry)
  888. dry.reset (spec.sampleRate, 0.05);
  889. for (auto& wet : volumeWet)
  890. wet.reset (spec.sampleRate, 0.05);
  891. sampleRate = spec.sampleRate;
  892. dryBlock = AudioBlock<float> (dryBlockStorage,
  893. jmin (spec.numChannels, 2u),
  894. spec.maximumBlockSize);
  895. }
  896. template <typename ProcessWet>
  897. void Convolution::Mixer::processSamples (const AudioBlock<const float>& input,
  898. AudioBlock<float>& output,
  899. bool isBypassed,
  900. ProcessWet&& processWet) noexcept
  901. {
  902. const auto numChannels = jmin (input.getNumChannels(), volumeDry.size());
  903. const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  904. auto dry = dryBlock.getSubsetChannelBlock (0, numChannels);
  905. if (volumeDry[0].isSmoothing())
  906. {
  907. dry.copyFrom (input);
  908. for (size_t channel = 0; channel < numChannels; ++channel)
  909. volumeDry[channel].applyGain (dry.getChannelPointer (channel), (int) numSamples);
  910. processWet (input, output);
  911. for (size_t channel = 0; channel < numChannels; ++channel)
  912. volumeWet[channel].applyGain (output.getChannelPointer (channel), (int) numSamples);
  913. output += dry;
  914. }
  915. else
  916. {
  917. if (! currentIsBypassed)
  918. processWet (input, output);
  919. if (isBypassed != currentIsBypassed)
  920. {
  921. currentIsBypassed = isBypassed;
  922. for (size_t channel = 0; channel < numChannels; ++channel)
  923. {
  924. volumeDry[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  925. volumeDry[channel].reset (sampleRate, 0.05);
  926. volumeDry[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  927. volumeWet[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  928. volumeWet[channel].reset (sampleRate, 0.05);
  929. volumeWet[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  930. }
  931. }
  932. }
  933. }
  934. void Convolution::Mixer::reset() { dryBlock.clear(); }
  935. //==============================================================================
  936. Convolution::Convolution()
  937. : Convolution (Latency { 0 })
  938. {}
  939. Convolution::Convolution (ConvolutionMessageQueue& queue)
  940. : Convolution (Latency { 0 }, queue)
  941. {}
  942. Convolution::Convolution (const Latency& requiredLatency)
  943. : Convolution (requiredLatency,
  944. {},
  945. OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
  946. {}
  947. Convolution::Convolution (const NonUniform& nonUniform)
  948. : Convolution ({},
  949. nonUniform,
  950. OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
  951. {}
  952. Convolution::Convolution (const Latency& requiredLatency, ConvolutionMessageQueue& queue)
  953. : Convolution (requiredLatency, {}, OptionalQueue { queue })
  954. {}
  955. Convolution::Convolution (const NonUniform& nonUniform, ConvolutionMessageQueue& queue)
  956. : Convolution ({}, nonUniform, OptionalQueue { queue })
  957. {}
  958. Convolution::Convolution (const Latency& latency,
  959. const NonUniform& nonUniform,
  960. OptionalQueue&& queue)
  961. : pimpl (std::make_unique<Impl> (latency, nonUniform, std::move (queue)))
  962. {}
  963. Convolution::~Convolution() noexcept = default;
  964. void Convolution::loadImpulseResponse (const void* sourceData,
  965. size_t sourceDataSize,
  966. Stereo stereo,
  967. Trim trim,
  968. size_t size,
  969. Normalise normalise)
  970. {
  971. pimpl->loadImpulseResponse (sourceData, sourceDataSize, stereo, trim, size, normalise);
  972. }
  973. void Convolution::loadImpulseResponse (const File& fileImpulseResponse,
  974. Stereo stereo,
  975. Trim trim,
  976. size_t size,
  977. Normalise normalise)
  978. {
  979. pimpl->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
  980. }
  981. void Convolution::loadImpulseResponse (AudioBuffer<float>&& buffer,
  982. double originalSampleRate,
  983. Stereo stereo,
  984. Trim trim,
  985. Normalise normalise)
  986. {
  987. pimpl->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
  988. }
  989. void Convolution::prepare (const ProcessSpec& spec)
  990. {
  991. mixer.prepare (spec);
  992. pimpl->prepare (spec);
  993. isActive = true;
  994. }
  995. void Convolution::reset() noexcept
  996. {
  997. mixer.reset();
  998. pimpl->reset();
  999. }
  1000. void Convolution::processSamples (const AudioBlock<const float>& input,
  1001. AudioBlock<float>& output,
  1002. bool isBypassed) noexcept
  1003. {
  1004. if (! isActive)
  1005. return;
  1006. jassert (input.getNumChannels() == output.getNumChannels());
  1007. jassert (isPositiveAndBelow (input.getNumChannels(), static_cast<size_t> (3))); // only mono and stereo is supported
  1008. mixer.processSamples (input, output, isBypassed, [this] (const auto& in, auto& out)
  1009. {
  1010. pimpl->processSamples (in, out);
  1011. });
  1012. }
  1013. int Convolution::getCurrentIRSize() const { return pimpl->getCurrentIRSize(); }
  1014. int Convolution::getLatency() const { return pimpl->getLatency(); }
  1015. } // namespace dsp
  1016. } // namespace juce