The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1275 lines
48KB

  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE library.
  4. Copyright (c) 2020 - Raw Material Software Limited
  5. JUCE is an open source library subject to commercial or open-source
  6. licensing.
  7. By using JUCE, you agree to the terms of both the JUCE 6 End-User License
  8. Agreement and JUCE Privacy Policy (both effective as of the 16th June 2020).
  9. End User License Agreement: www.juce.com/juce-6-licence
  10. Privacy Policy: www.juce.com/juce-privacy-policy
  11. Or: You may also use this code under the terms of the GPL v3 (see
  12. www.gnu.org/licenses).
  13. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  14. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  15. DISCLAIMED.
  16. ==============================================================================
  17. */
  18. namespace juce
  19. {
  20. namespace dsp
  21. {
  22. template <typename Element>
  23. class Queue
  24. {
  25. public:
  26. explicit Queue (int size)
  27. : fifo (size), storage (static_cast<size_t> (size)) {}
  28. bool push (Element& element) noexcept
  29. {
  30. if (fifo.getFreeSpace() == 0)
  31. return false;
  32. const auto writer = fifo.write (1);
  33. if (writer.blockSize1 != 0)
  34. storage[static_cast<size_t> (writer.startIndex1)] = std::move (element);
  35. else if (writer.blockSize2 != 0)
  36. storage[static_cast<size_t> (writer.startIndex2)] = std::move (element);
  37. return true;
  38. }
  39. template <typename Fn>
  40. void pop (Fn&& fn) { popN (1, std::forward<Fn> (fn)); }
  41. template <typename Fn>
  42. void popAll (Fn&& fn) { popN (fifo.getNumReady(), std::forward<Fn> (fn)); }
  43. bool hasPendingMessages() const noexcept { return fifo.getNumReady() > 0; }
  44. private:
  45. template <typename Fn>
  46. void popN (int n, Fn&& fn)
  47. {
  48. fifo.read (n).forEach ([&] (int index)
  49. {
  50. fn (storage[static_cast<size_t> (index)]);
  51. });
  52. }
  53. AbstractFifo fifo;
  54. std::vector<Element> storage;
  55. };
  56. class BackgroundMessageQueue : private Thread
  57. {
  58. public:
  59. explicit BackgroundMessageQueue (int entries)
  60. : Thread ("Convolution background loader"), queue (entries)
  61. {}
  62. using IncomingCommand = FixedSizeFunction<400, void()>;
  63. // Push functions here, and they'll be called later on a background thread.
  64. // This function is wait-free.
  65. // This function is only safe to call from a single thread at a time.
  66. bool push (IncomingCommand& command) { return queue.push (command); }
  67. using Thread::startThread;
  68. using Thread::stopThread;
  69. private:
  70. void run() override
  71. {
  72. while (! threadShouldExit())
  73. {
  74. if (queue.hasPendingMessages())
  75. queue.pop ([] (IncomingCommand& command) { command(); command = nullptr;});
  76. else
  77. sleep (10);
  78. }
  79. }
  80. Queue<IncomingCommand> queue;
  81. };
  82. struct ConvolutionMessageQueue::Impl : public BackgroundMessageQueue
  83. {
  84. using BackgroundMessageQueue::BackgroundMessageQueue;
  85. };
  86. ConvolutionMessageQueue::ConvolutionMessageQueue()
  87. : ConvolutionMessageQueue (1000)
  88. {}
  89. ConvolutionMessageQueue::ConvolutionMessageQueue (int entries)
  90. : pimpl (std::make_unique<Impl> (entries))
  91. {
  92. pimpl->startThread();
  93. }
  94. ConvolutionMessageQueue::~ConvolutionMessageQueue() noexcept
  95. {
  96. pimpl->stopThread (-1);
  97. }
  98. ConvolutionMessageQueue::ConvolutionMessageQueue (ConvolutionMessageQueue&&) noexcept = default;
  99. ConvolutionMessageQueue& ConvolutionMessageQueue::operator= (ConvolutionMessageQueue&&) noexcept = default;
  100. //==============================================================================
  101. struct ConvolutionEngine
  102. {
  103. ConvolutionEngine (const float* samples,
  104. size_t numSamples,
  105. size_t maxBlockSize)
  106. : blockSize ((size_t) nextPowerOfTwo ((int) maxBlockSize)),
  107. fftSize (blockSize > 128 ? 2 * blockSize : 4 * blockSize),
  108. fftObject (std::make_unique<FFT> (roundToInt (std::log2 (fftSize)))),
  109. numSegments (numSamples / (fftSize - blockSize) + 1u),
  110. numInputSegments ((blockSize > 128 ? numSegments : 3 * numSegments)),
  111. bufferInput (1, static_cast<int> (fftSize)),
  112. bufferOutput (1, static_cast<int> (fftSize * 2)),
  113. bufferTempOutput (1, static_cast<int> (fftSize * 2)),
  114. bufferOverlap (1, static_cast<int> (fftSize))
  115. {
  116. bufferOutput.clear();
  117. auto updateSegmentsIfNecessary = [this] (size_t numSegmentsToUpdate,
  118. std::vector<AudioBuffer<float>>& segments)
  119. {
  120. if (numSegmentsToUpdate == 0
  121. || numSegmentsToUpdate != (size_t) segments.size()
  122. || (size_t) segments[0].getNumSamples() != fftSize * 2)
  123. {
  124. segments.clear();
  125. for (size_t i = 0; i < numSegmentsToUpdate; ++i)
  126. segments.push_back ({ 1, static_cast<int> (fftSize * 2) });
  127. }
  128. };
  129. updateSegmentsIfNecessary (numInputSegments, buffersInputSegments);
  130. updateSegmentsIfNecessary (numSegments, buffersImpulseSegments);
  131. auto FFTTempObject = std::make_unique<FFT> (roundToInt (std::log2 (fftSize)));
  132. size_t currentPtr = 0;
  133. for (auto& buf : buffersImpulseSegments)
  134. {
  135. buf.clear();
  136. auto* impulseResponse = buf.getWritePointer (0);
  137. if (&buf == &buffersImpulseSegments.front())
  138. impulseResponse[0] = 1.0f;
  139. FloatVectorOperations::copy (impulseResponse,
  140. samples + currentPtr,
  141. static_cast<int> (jmin (fftSize - blockSize, numSamples - currentPtr)));
  142. FFTTempObject->performRealOnlyForwardTransform (impulseResponse);
  143. prepareForConvolution (impulseResponse);
  144. currentPtr += (fftSize - blockSize);
  145. }
  146. reset();
  147. }
  148. void reset()
  149. {
  150. bufferInput.clear();
  151. bufferOverlap.clear();
  152. bufferTempOutput.clear();
  153. bufferOutput.clear();
  154. for (auto& buf : buffersInputSegments)
  155. buf.clear();
  156. currentSegment = 0;
  157. inputDataPos = 0;
  158. }
  159. void processSamples (const float* input, float* output, size_t numSamples)
  160. {
  161. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  162. size_t numSamplesProcessed = 0;
  163. auto indexStep = numInputSegments / numSegments;
  164. auto* inputData = bufferInput.getWritePointer (0);
  165. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  166. auto* outputData = bufferOutput.getWritePointer (0);
  167. auto* overlapData = bufferOverlap.getWritePointer (0);
  168. while (numSamplesProcessed < numSamples)
  169. {
  170. const bool inputDataWasEmpty = (inputDataPos == 0);
  171. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  172. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  173. auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
  174. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (fftSize));
  175. fftObject->performRealOnlyForwardTransform (inputSegmentData);
  176. prepareForConvolution (inputSegmentData);
  177. // Complex multiplication
  178. if (inputDataWasEmpty)
  179. {
  180. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (fftSize + 1));
  181. auto index = currentSegment;
  182. for (size_t i = 1; i < numSegments; ++i)
  183. {
  184. index += indexStep;
  185. if (index >= numInputSegments)
  186. index -= numInputSegments;
  187. convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
  188. buffersImpulseSegments[i].getWritePointer (0),
  189. outputTempData);
  190. }
  191. }
  192. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (fftSize + 1));
  193. convolutionProcessingAndAccumulate (inputSegmentData,
  194. buffersImpulseSegments.front().getWritePointer (0),
  195. outputData);
  196. updateSymmetricFrequencyDomainData (outputData);
  197. fftObject->performRealOnlyInverseTransform (outputData);
  198. // Add overlap
  199. FloatVectorOperations::add (&output[numSamplesProcessed], &outputData[inputDataPos], &overlapData[inputDataPos], (int) numSamplesToProcess);
  200. // Input buffer full => Next block
  201. inputDataPos += numSamplesToProcess;
  202. if (inputDataPos == blockSize)
  203. {
  204. // Input buffer is empty again now
  205. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (fftSize));
  206. inputDataPos = 0;
  207. // Extra step for segSize > blockSize
  208. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (fftSize - 2 * blockSize));
  209. // Save the overlap
  210. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (fftSize - blockSize));
  211. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  212. }
  213. numSamplesProcessed += numSamplesToProcess;
  214. }
  215. }
  216. void processSamplesWithAddedLatency (const float* input, float* output, size_t numSamples)
  217. {
  218. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  219. size_t numSamplesProcessed = 0;
  220. auto indexStep = numInputSegments / numSegments;
  221. auto* inputData = bufferInput.getWritePointer (0);
  222. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  223. auto* outputData = bufferOutput.getWritePointer (0);
  224. auto* overlapData = bufferOverlap.getWritePointer (0);
  225. while (numSamplesProcessed < numSamples)
  226. {
  227. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  228. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  229. FloatVectorOperations::copy (output + numSamplesProcessed, outputData + inputDataPos, static_cast<int> (numSamplesToProcess));
  230. numSamplesProcessed += numSamplesToProcess;
  231. inputDataPos += numSamplesToProcess;
  232. // processing itself when needed (with latency)
  233. if (inputDataPos == blockSize)
  234. {
  235. // Copy input data in input segment
  236. auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
  237. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (fftSize));
  238. fftObject->performRealOnlyForwardTransform (inputSegmentData);
  239. prepareForConvolution (inputSegmentData);
  240. // Complex multiplication
  241. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (fftSize + 1));
  242. auto index = currentSegment;
  243. for (size_t i = 1; i < numSegments; ++i)
  244. {
  245. index += indexStep;
  246. if (index >= numInputSegments)
  247. index -= numInputSegments;
  248. convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
  249. buffersImpulseSegments[i].getWritePointer (0),
  250. outputTempData);
  251. }
  252. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (fftSize + 1));
  253. convolutionProcessingAndAccumulate (inputSegmentData,
  254. buffersImpulseSegments.front().getWritePointer (0),
  255. outputData);
  256. updateSymmetricFrequencyDomainData (outputData);
  257. fftObject->performRealOnlyInverseTransform (outputData);
  258. // Add overlap
  259. FloatVectorOperations::add (outputData, overlapData, static_cast<int> (blockSize));
  260. // Input buffer is empty again now
  261. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (fftSize));
  262. // Extra step for segSize > blockSize
  263. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (fftSize - 2 * blockSize));
  264. // Save the overlap
  265. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (fftSize - blockSize));
  266. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  267. inputDataPos = 0;
  268. }
  269. }
  270. }
  271. // After each FFT, this function is called to allow convolution to be performed with only 4 SIMD functions calls.
  272. void prepareForConvolution (float *samples) noexcept
  273. {
  274. auto FFTSizeDiv2 = fftSize / 2;
  275. for (size_t i = 0; i < FFTSizeDiv2; i++)
  276. samples[i] = samples[i << 1];
  277. samples[FFTSizeDiv2] = 0;
  278. for (size_t i = 1; i < FFTSizeDiv2; i++)
  279. samples[i + FFTSizeDiv2] = -samples[((fftSize - i) << 1) + 1];
  280. }
  281. // Does the convolution operation itself only on half of the frequency domain samples.
  282. void convolutionProcessingAndAccumulate (const float *input, const float *impulse, float *output)
  283. {
  284. auto FFTSizeDiv2 = fftSize / 2;
  285. FloatVectorOperations::addWithMultiply (output, input, impulse, static_cast<int> (FFTSizeDiv2));
  286. FloatVectorOperations::subtractWithMultiply (output, &(input[FFTSizeDiv2]), &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  287. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), input, &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  288. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), &(input[FFTSizeDiv2]), impulse, static_cast<int> (FFTSizeDiv2));
  289. output[fftSize] += input[fftSize] * impulse[fftSize];
  290. }
  291. // Undoes the re-organization of samples from the function prepareForConvolution.
  292. // Then takes the conjugate of the frequency domain first half of samples to fill the
  293. // second half, so that the inverse transform will return real samples in the time domain.
  294. void updateSymmetricFrequencyDomainData (float* samples) noexcept
  295. {
  296. auto FFTSizeDiv2 = fftSize / 2;
  297. for (size_t i = 1; i < FFTSizeDiv2; i++)
  298. {
  299. samples[(fftSize - i) << 1] = samples[i];
  300. samples[((fftSize - i) << 1) + 1] = -samples[FFTSizeDiv2 + i];
  301. }
  302. samples[1] = 0.f;
  303. for (size_t i = 1; i < FFTSizeDiv2; i++)
  304. {
  305. samples[i << 1] = samples[(fftSize - i) << 1];
  306. samples[(i << 1) + 1] = -samples[((fftSize - i) << 1) + 1];
  307. }
  308. }
  309. //==============================================================================
  310. const size_t blockSize;
  311. const size_t fftSize;
  312. const std::unique_ptr<FFT> fftObject;
  313. const size_t numSegments;
  314. const size_t numInputSegments;
  315. size_t currentSegment = 0, inputDataPos = 0;
  316. AudioBuffer<float> bufferInput, bufferOutput, bufferTempOutput, bufferOverlap;
  317. std::vector<AudioBuffer<float>> buffersInputSegments, buffersImpulseSegments;
  318. };
  319. //==============================================================================
  320. class MultichannelEngine
  321. {
  322. public:
  323. MultichannelEngine (const AudioBuffer<float>& buf,
  324. int maxBlockSize,
  325. int maxBufferSize,
  326. Convolution::NonUniform headSizeIn,
  327. bool isZeroDelayIn)
  328. : tailBuffer (1, maxBlockSize),
  329. latency (isZeroDelayIn ? 0 : maxBufferSize),
  330. irSize (buf.getNumSamples()),
  331. blockSize (maxBlockSize),
  332. isZeroDelay (isZeroDelayIn)
  333. {
  334. constexpr auto numChannels = 2;
  335. const auto makeEngine = [&] (int channel, int offset, int length, uint32 thisBlockSize)
  336. {
  337. return std::make_unique<ConvolutionEngine> (buf.getReadPointer (jmin (buf.getNumChannels() - 1, channel), offset),
  338. length,
  339. static_cast<size_t> (thisBlockSize));
  340. };
  341. if (headSizeIn.headSizeInSamples == 0)
  342. {
  343. for (int i = 0; i < numChannels; ++i)
  344. head.emplace_back (makeEngine (i, 0, buf.getNumSamples(), static_cast<uint32> (maxBufferSize)));
  345. }
  346. else
  347. {
  348. const auto size = jmin (buf.getNumSamples(), headSizeIn.headSizeInSamples);
  349. for (int i = 0; i < numChannels; ++i)
  350. head.emplace_back (makeEngine (i, 0, size, static_cast<uint32> (maxBufferSize)));
  351. const auto tailBufferSize = static_cast<uint32> (headSizeIn.headSizeInSamples + (isZeroDelay ? 0 : maxBufferSize));
  352. if (size != buf.getNumSamples())
  353. for (int i = 0; i < numChannels; ++i)
  354. tail.emplace_back (makeEngine (i, size, buf.getNumSamples() - size, tailBufferSize));
  355. }
  356. }
  357. void reset()
  358. {
  359. for (const auto& e : head)
  360. e->reset();
  361. for (const auto& e : tail)
  362. e->reset();
  363. }
  364. void processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output)
  365. {
  366. const auto numChannels = jmin (head.size(), input.getNumChannels(), output.getNumChannels());
  367. const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  368. const AudioBlock<float> fullTailBlock (tailBuffer);
  369. const auto tailBlock = fullTailBlock.getSubBlock (0, (size_t) numSamples);
  370. const auto isUniform = tail.empty();
  371. for (size_t channel = 0; channel < numChannels; ++channel)
  372. {
  373. if (! isUniform)
  374. tail[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
  375. tailBlock.getChannelPointer (0),
  376. numSamples);
  377. if (isZeroDelay)
  378. head[channel]->processSamples (input.getChannelPointer (channel),
  379. output.getChannelPointer (channel),
  380. numSamples);
  381. else
  382. head[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
  383. output.getChannelPointer (channel),
  384. numSamples);
  385. if (! isUniform)
  386. output.getSingleChannelBlock (channel) += tailBlock;
  387. }
  388. const auto numOutputChannels = output.getNumChannels();
  389. for (auto i = numChannels; i < numOutputChannels; ++i)
  390. output.getSingleChannelBlock (i).copyFrom (output.getSingleChannelBlock (0));
  391. }
  392. int getIRSize() const noexcept { return irSize; }
  393. int getLatency() const noexcept { return latency; }
  394. int getBlockSize() const noexcept { return blockSize; }
  395. private:
  396. std::vector<std::unique_ptr<ConvolutionEngine>> head, tail;
  397. AudioBuffer<float> tailBuffer;
  398. const int latency;
  399. const int irSize;
  400. const int blockSize;
  401. const bool isZeroDelay;
  402. };
  403. static AudioBuffer<float> fixNumChannels (const AudioBuffer<float>& buf, Convolution::Stereo stereo)
  404. {
  405. const auto numChannels = jmin (buf.getNumChannels(), stereo == Convolution::Stereo::yes ? 2 : 1);
  406. const auto numSamples = buf.getNumSamples();
  407. AudioBuffer<float> result (numChannels, buf.getNumSamples());
  408. for (auto channel = 0; channel != numChannels; ++channel)
  409. result.copyFrom (channel, 0, buf.getReadPointer (channel), numSamples);
  410. if (result.getNumSamples() == 0 || result.getNumChannels() == 0)
  411. {
  412. result.setSize (1, 1);
  413. result.setSample (0, 0, 1.0f);
  414. }
  415. return result;
  416. }
  417. static AudioBuffer<float> trimImpulseResponse (const AudioBuffer<float>& buf)
  418. {
  419. const auto thresholdTrim = Decibels::decibelsToGain (-80.0f);
  420. const auto numChannels = buf.getNumChannels();
  421. const auto numSamples = buf.getNumSamples();
  422. std::ptrdiff_t offsetBegin = numSamples;
  423. std::ptrdiff_t offsetEnd = numSamples;
  424. for (auto channel = 0; channel < numChannels; ++channel)
  425. {
  426. const auto indexAboveThreshold = [&] (auto begin, auto end)
  427. {
  428. return std::distance (begin, std::find_if (begin, end, [&] (float sample)
  429. {
  430. return std::abs (sample) >= thresholdTrim;
  431. }));
  432. };
  433. const auto channelBegin = buf.getReadPointer (channel);
  434. const auto channelEnd = channelBegin + numSamples;
  435. const auto itStart = indexAboveThreshold (channelBegin, channelEnd);
  436. const auto itEnd = indexAboveThreshold (std::make_reverse_iterator (channelEnd),
  437. std::make_reverse_iterator (channelBegin));
  438. offsetBegin = jmin (offsetBegin, itStart);
  439. offsetEnd = jmin (offsetEnd, itEnd);
  440. }
  441. if (offsetBegin == numSamples)
  442. {
  443. auto result = AudioBuffer<float> (numChannels, 1);
  444. result.clear();
  445. return result;
  446. }
  447. const auto newLength = jmax (1, numSamples - static_cast<int> (offsetBegin + offsetEnd));
  448. AudioBuffer<float> result (numChannels, newLength);
  449. for (auto channel = 0; channel < numChannels; ++channel)
  450. {
  451. result.copyFrom (channel,
  452. 0,
  453. buf.getReadPointer (channel, static_cast<int> (offsetBegin)),
  454. result.getNumSamples());
  455. }
  456. return result;
  457. }
  458. static float calculateNormalisationFactor (float sumSquaredMagnitude)
  459. {
  460. if (sumSquaredMagnitude < 1e-8f)
  461. return 1.0f;
  462. return 0.125f / std::sqrt (sumSquaredMagnitude);
  463. }
  464. static void normaliseImpulseResponse (AudioBuffer<float>& buf)
  465. {
  466. const auto numChannels = buf.getNumChannels();
  467. const auto numSamples = buf.getNumSamples();
  468. const auto channelPtrs = buf.getArrayOfWritePointers();
  469. const auto maxSumSquaredMag = std::accumulate (channelPtrs, channelPtrs + numChannels, 0.0f, [numSamples] (auto max, auto* channel)
  470. {
  471. return jmax (max, std::accumulate (channel, channel + numSamples, 0.0f, [] (auto sum, auto samp)
  472. {
  473. return sum + (samp * samp);
  474. }));
  475. });
  476. const auto normalisationFactor = calculateNormalisationFactor (maxSumSquaredMag);
  477. std::for_each (channelPtrs, channelPtrs + numChannels, [normalisationFactor, numSamples] (auto* channel)
  478. {
  479. FloatVectorOperations::multiply (channel, normalisationFactor, numSamples);
  480. });
  481. }
  482. static AudioBuffer<float> resampleImpulseResponse (const AudioBuffer<float>& buf,
  483. const double srcSampleRate,
  484. const double destSampleRate)
  485. {
  486. if (srcSampleRate == destSampleRate)
  487. return buf;
  488. const auto factorReading = srcSampleRate / destSampleRate;
  489. AudioBuffer<float> original = buf;
  490. MemoryAudioSource memorySource (original, false);
  491. ResamplingAudioSource resamplingSource (&memorySource, false, buf.getNumChannels());
  492. const auto finalSize = roundToInt (jmax (1.0, buf.getNumSamples() / factorReading));
  493. resamplingSource.setResamplingRatio (factorReading);
  494. resamplingSource.prepareToPlay (finalSize, srcSampleRate);
  495. AudioBuffer<float> result (buf.getNumChannels(), finalSize);
  496. resamplingSource.getNextAudioBlock ({ &result, 0, result.getNumSamples() });
  497. return result;
  498. }
  499. //==============================================================================
  500. template <typename Element>
  501. class TryLockedPtr
  502. {
  503. public:
  504. void set (std::unique_ptr<Element> p)
  505. {
  506. const SpinLock::ScopedLockType lock (mutex);
  507. ptr = std::move (p);
  508. }
  509. std::unique_ptr<MultichannelEngine> get()
  510. {
  511. const SpinLock::ScopedTryLockType lock (mutex);
  512. return lock.isLocked() ? std::move (ptr) : nullptr;
  513. }
  514. private:
  515. std::unique_ptr<Element> ptr;
  516. SpinLock mutex;
  517. };
  518. struct BufferWithSampleRate
  519. {
  520. BufferWithSampleRate() = default;
  521. BufferWithSampleRate (AudioBuffer<float>&& bufferIn, double sampleRateIn)
  522. : buffer (std::move (bufferIn)), sampleRate (sampleRateIn) {}
  523. AudioBuffer<float> buffer;
  524. double sampleRate = 0.0;
  525. };
  526. static BufferWithSampleRate loadStreamToBuffer (std::unique_ptr<InputStream> stream, size_t maxLength)
  527. {
  528. AudioFormatManager manager;
  529. manager.registerBasicFormats();
  530. std::unique_ptr<AudioFormatReader> formatReader (manager.createReaderFor (std::move (stream)));
  531. if (formatReader == nullptr)
  532. return {};
  533. const auto fileLength = static_cast<size_t> (formatReader->lengthInSamples);
  534. const auto lengthToLoad = maxLength == 0 ? fileLength : jmin (maxLength, fileLength);
  535. BufferWithSampleRate result { { jlimit (1, 2, static_cast<int> (formatReader->numChannels)),
  536. static_cast<int> (lengthToLoad) },
  537. formatReader->sampleRate };
  538. formatReader->read (result.buffer.getArrayOfWritePointers(),
  539. result.buffer.getNumChannels(),
  540. 0,
  541. result.buffer.getNumSamples());
  542. return result;
  543. }
  544. // This class caches the data required to build a new convolution engine
  545. // (in particular, impulse response data and a ProcessSpec).
  546. // Calls to `setProcessSpec` and `setImpulseResponse` construct a
  547. // new engine, which can be retrieved by calling `getEngine`.
  548. class ConvolutionEngineFactory
  549. {
  550. public:
  551. ConvolutionEngineFactory (Convolution::Latency requiredLatency,
  552. Convolution::NonUniform requiredHeadSize)
  553. : latency { (requiredLatency.latencyInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredLatency.latencyInSamples)) },
  554. headSize { (requiredHeadSize.headSizeInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredHeadSize.headSizeInSamples)) },
  555. shouldBeZeroLatency (requiredLatency.latencyInSamples == 0)
  556. {}
  557. // It is safe to call this method simultaneously with other public
  558. // member functions.
  559. void setProcessSpec (const ProcessSpec& spec)
  560. {
  561. const std::lock_guard<std::mutex> lock (mutex);
  562. processSpec = spec;
  563. engine.set (makeEngine());
  564. }
  565. // It is safe to call this method simultaneously with other public
  566. // member functions.
  567. void setImpulseResponse (BufferWithSampleRate&& buf,
  568. Convolution::Stereo stereo,
  569. Convolution::Trim trim,
  570. Convolution::Normalise normalise)
  571. {
  572. const std::lock_guard<std::mutex> lock (mutex);
  573. wantsNormalise = normalise;
  574. originalSampleRate = buf.sampleRate;
  575. impulseResponse = [&]
  576. {
  577. auto corrected = fixNumChannels (buf.buffer, stereo);
  578. return trim == Convolution::Trim::yes ? trimImpulseResponse (corrected) : corrected;
  579. }();
  580. engine.set (makeEngine());
  581. }
  582. // Returns the most recently-created engine, or nullptr
  583. // if there is no pending engine, or if the engine is currently
  584. // being updated by one of the setter methods.
  585. // It is safe to call this simultaneously with other public
  586. // member functions.
  587. std::unique_ptr<MultichannelEngine> getEngine() { return engine.get(); }
  588. private:
  589. std::unique_ptr<MultichannelEngine> makeEngine()
  590. {
  591. auto resampled = resampleImpulseResponse (impulseResponse, originalSampleRate, processSpec.sampleRate);
  592. if (wantsNormalise == Convolution::Normalise::yes)
  593. normaliseImpulseResponse (resampled);
  594. const auto currentLatency = jmax (processSpec.maximumBlockSize, (uint32) latency.latencyInSamples);
  595. const auto maxBufferSize = shouldBeZeroLatency ? static_cast<int> (processSpec.maximumBlockSize)
  596. : nextPowerOfTwo (static_cast<int> (currentLatency));
  597. return std::make_unique<MultichannelEngine> (resampled,
  598. processSpec.maximumBlockSize,
  599. maxBufferSize,
  600. headSize,
  601. shouldBeZeroLatency);
  602. }
  603. static AudioBuffer<float> makeImpulseBuffer()
  604. {
  605. AudioBuffer<float> result (1, 1);
  606. result.setSample (0, 0, 1.0f);
  607. return result;
  608. }
  609. ProcessSpec processSpec { 44100.0, 128, 2 };
  610. AudioBuffer<float> impulseResponse = makeImpulseBuffer();
  611. double originalSampleRate = processSpec.sampleRate;
  612. Convolution::Normalise wantsNormalise = Convolution::Normalise::no;
  613. const Convolution::Latency latency;
  614. const Convolution::NonUniform headSize;
  615. const bool shouldBeZeroLatency;
  616. TryLockedPtr<MultichannelEngine> engine;
  617. mutable std::mutex mutex;
  618. };
  619. static void setImpulseResponse (ConvolutionEngineFactory& factory,
  620. const void* sourceData,
  621. size_t sourceDataSize,
  622. Convolution::Stereo stereo,
  623. Convolution::Trim trim,
  624. size_t size,
  625. Convolution::Normalise normalise)
  626. {
  627. factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<MemoryInputStream> (sourceData, sourceDataSize, false), size),
  628. stereo, trim, normalise);
  629. }
  630. static void setImpulseResponse (ConvolutionEngineFactory& factory,
  631. const File& fileImpulseResponse,
  632. Convolution::Stereo stereo,
  633. Convolution::Trim trim,
  634. size_t size,
  635. Convolution::Normalise normalise)
  636. {
  637. factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<FileInputStream> (fileImpulseResponse), size),
  638. stereo, trim, normalise);
  639. }
  640. // This class acts as a destination for convolution engines which are loaded on
  641. // a background thread.
  642. // Deriving from `enable_shared_from_this` allows us to capture a reference to
  643. // this object when adding commands to the background message queue.
  644. // That way, we can avoid dangling references in the background thread in the case
  645. // that a Convolution instance is deleted before the background message queue.
  646. class ConvolutionEngineQueue : public std::enable_shared_from_this<ConvolutionEngineQueue>
  647. {
  648. public:
  649. ConvolutionEngineQueue (BackgroundMessageQueue& queue,
  650. Convolution::Latency latencyIn,
  651. Convolution::NonUniform headSizeIn)
  652. : messageQueue (queue), factory (latencyIn, headSizeIn) {}
  653. void loadImpulseResponse (AudioBuffer<float>&& buffer,
  654. double sr,
  655. Convolution::Stereo stereo,
  656. Convolution::Trim trim,
  657. Convolution::Normalise normalise)
  658. {
  659. callLater ([b = std::move (buffer), sr, stereo, trim, normalise] (ConvolutionEngineFactory& f) mutable
  660. {
  661. f.setImpulseResponse ({ std::move (b), sr }, stereo, trim, normalise);
  662. });
  663. }
  664. void loadImpulseResponse (const void* sourceData,
  665. size_t sourceDataSize,
  666. Convolution::Stereo stereo,
  667. Convolution::Trim trim,
  668. size_t size,
  669. Convolution::Normalise normalise)
  670. {
  671. callLater ([sourceData, sourceDataSize, stereo, trim, size, normalise] (ConvolutionEngineFactory& f) mutable
  672. {
  673. setImpulseResponse (f, sourceData, sourceDataSize, stereo, trim, size, normalise);
  674. });
  675. }
  676. void loadImpulseResponse (const File& fileImpulseResponse,
  677. Convolution::Stereo stereo,
  678. Convolution::Trim trim,
  679. size_t size,
  680. Convolution::Normalise normalise)
  681. {
  682. callLater ([fileImpulseResponse, stereo, trim, size, normalise] (ConvolutionEngineFactory& f) mutable
  683. {
  684. setImpulseResponse (f, fileImpulseResponse, stereo, trim, size, normalise);
  685. });
  686. }
  687. void prepare (const ProcessSpec& spec)
  688. {
  689. factory.setProcessSpec (spec);
  690. }
  691. // Call this regularly to try to resend any pending message.
  692. // This allows us to always apply the most recently requested
  693. // state (eventually), even if the message queue fills up.
  694. void postPendingCommand()
  695. {
  696. if (pendingCommand == nullptr)
  697. return;
  698. if (messageQueue.push (pendingCommand))
  699. pendingCommand = nullptr;
  700. }
  701. std::unique_ptr<MultichannelEngine> getEngine() { return factory.getEngine(); }
  702. private:
  703. template <typename Fn>
  704. void callLater (Fn&& fn)
  705. {
  706. // If there was already a pending command (because the queue was full) we'll end up deleting it here.
  707. // Not much we can do about that!
  708. pendingCommand = [weak = weakFromThis(), callback = std::forward<Fn> (fn)]() mutable
  709. {
  710. if (auto t = weak.lock())
  711. callback (t->factory);
  712. };
  713. postPendingCommand();
  714. }
  715. std::weak_ptr<ConvolutionEngineQueue> weakFromThis() { return shared_from_this(); }
  716. BackgroundMessageQueue& messageQueue;
  717. ConvolutionEngineFactory factory;
  718. BackgroundMessageQueue::IncomingCommand pendingCommand;
  719. };
  720. class CrossoverMixer
  721. {
  722. public:
  723. void reset()
  724. {
  725. smoother.setCurrentAndTargetValue (1.0f);
  726. }
  727. void prepare (const ProcessSpec& spec)
  728. {
  729. smoother.reset (spec.sampleRate, 0.05);
  730. smootherBuffer.setSize (1, static_cast<int> (spec.maximumBlockSize));
  731. mixBuffer.setSize (static_cast<int> (spec.numChannels), static_cast<int> (spec.maximumBlockSize));
  732. reset();
  733. }
  734. template <typename ProcessCurrent, typename ProcessPrevious, typename NotifyDone>
  735. void processSamples (const AudioBlock<const float>& input,
  736. AudioBlock<float>& output,
  737. ProcessCurrent&& current,
  738. ProcessPrevious&& previous,
  739. NotifyDone&& notifyDone)
  740. {
  741. if (smoother.isSmoothing())
  742. {
  743. const auto numSamples = static_cast<int> (input.getNumSamples());
  744. for (auto sample = 0; sample != numSamples; ++sample)
  745. smootherBuffer.setSample (0, sample, smoother.getNextValue());
  746. AudioBlock<float> mixBlock (mixBuffer);
  747. mixBlock.clear();
  748. previous (input, mixBlock);
  749. for (size_t channel = 0; channel != output.getNumChannels(); ++channel)
  750. {
  751. FloatVectorOperations::multiply (mixBlock.getChannelPointer (channel),
  752. smootherBuffer.getReadPointer (0),
  753. numSamples);
  754. }
  755. FloatVectorOperations::multiply (smootherBuffer.getWritePointer (0), -1.0f, numSamples);
  756. FloatVectorOperations::add (smootherBuffer.getWritePointer (0), 1.0f, numSamples);
  757. current (input, output);
  758. for (size_t channel = 0; channel != output.getNumChannels(); ++channel)
  759. {
  760. FloatVectorOperations::multiply (output.getChannelPointer (channel),
  761. smootherBuffer.getReadPointer (0),
  762. numSamples);
  763. FloatVectorOperations::add (output.getChannelPointer (channel),
  764. mixBlock.getChannelPointer (channel),
  765. numSamples);
  766. }
  767. if (! smoother.isSmoothing())
  768. notifyDone();
  769. }
  770. else
  771. {
  772. current (input, output);
  773. }
  774. }
  775. void beginTransition()
  776. {
  777. smoother.setCurrentAndTargetValue (1.0f);
  778. smoother.setTargetValue (0.0f);
  779. }
  780. private:
  781. LinearSmoothedValue<float> smoother;
  782. AudioBuffer<float> smootherBuffer;
  783. AudioBuffer<float> mixBuffer;
  784. };
  785. using OptionalQueue = OptionalScopedPointer<ConvolutionMessageQueue>;
  786. class Convolution::Impl
  787. {
  788. public:
  789. Impl (Latency requiredLatency,
  790. NonUniform requiredHeadSize,
  791. OptionalQueue&& queue)
  792. : messageQueue (std::move (queue)),
  793. engineQueue (std::make_shared<ConvolutionEngineQueue> (*messageQueue->pimpl,
  794. requiredLatency,
  795. requiredHeadSize))
  796. {}
  797. void reset()
  798. {
  799. mixer.reset();
  800. if (currentEngine != nullptr)
  801. currentEngine->reset();
  802. destroyPreviousEngine();
  803. }
  804. void prepare (const ProcessSpec& spec)
  805. {
  806. mixer.prepare (spec);
  807. engineQueue->prepare (spec);
  808. currentEngine = engineQueue->getEngine();
  809. previousEngine = nullptr;
  810. jassert (currentEngine != nullptr);
  811. }
  812. void processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output)
  813. {
  814. engineQueue->postPendingCommand();
  815. if (previousEngine == nullptr)
  816. installPendingEngine();
  817. mixer.processSamples (input,
  818. output,
  819. [this] (const AudioBlock<const float>& in, AudioBlock<float>& out)
  820. {
  821. currentEngine->processSamples (in, out);
  822. },
  823. [this] (const AudioBlock<const float>& in, AudioBlock<float>& out)
  824. {
  825. if (previousEngine != nullptr)
  826. previousEngine->processSamples (in, out);
  827. else
  828. out.copyFrom (in);
  829. },
  830. [this] { destroyPreviousEngine(); });
  831. }
  832. int getCurrentIRSize() const { return currentEngine != nullptr ? currentEngine->getIRSize() : 0; }
  833. int getLatency() const { return currentEngine != nullptr ? currentEngine->getLatency() : 0; }
  834. void loadImpulseResponse (AudioBuffer<float>&& buffer,
  835. double originalSampleRate,
  836. Stereo stereo,
  837. Trim trim,
  838. Normalise normalise)
  839. {
  840. engineQueue->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
  841. }
  842. void loadImpulseResponse (const void* sourceData,
  843. size_t sourceDataSize,
  844. Stereo stereo,
  845. Trim trim,
  846. size_t size,
  847. Normalise normalise)
  848. {
  849. engineQueue->loadImpulseResponse (sourceData, sourceDataSize, stereo, trim, size, normalise);
  850. }
  851. void loadImpulseResponse (const File& fileImpulseResponse,
  852. Stereo stereo,
  853. Trim trim,
  854. size_t size,
  855. Normalise normalise)
  856. {
  857. engineQueue->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
  858. }
  859. private:
  860. void destroyPreviousEngine()
  861. {
  862. // If the queue is full, we'll destroy this straight away
  863. BackgroundMessageQueue::IncomingCommand command = [p = std::move (previousEngine)]() mutable { p = nullptr; };
  864. messageQueue->pimpl->push (command);
  865. }
  866. void installNewEngine (std::unique_ptr<MultichannelEngine> newEngine)
  867. {
  868. destroyPreviousEngine();
  869. previousEngine = std::move (currentEngine);
  870. currentEngine = std::move (newEngine);
  871. mixer.beginTransition();
  872. }
  873. void installPendingEngine()
  874. {
  875. if (auto newEngine = engineQueue->getEngine())
  876. installNewEngine (std::move (newEngine));
  877. }
  878. OptionalQueue messageQueue;
  879. std::shared_ptr<ConvolutionEngineQueue> engineQueue;
  880. std::unique_ptr<MultichannelEngine> previousEngine, currentEngine;
  881. CrossoverMixer mixer;
  882. };
  883. //==============================================================================
  884. void Convolution::Mixer::prepare (const ProcessSpec& spec)
  885. {
  886. for (auto& dry : volumeDry)
  887. dry.reset (spec.sampleRate, 0.05);
  888. for (auto& wet : volumeWet)
  889. wet.reset (spec.sampleRate, 0.05);
  890. sampleRate = spec.sampleRate;
  891. dryBlock = AudioBlock<float> (dryBlockStorage,
  892. jmin (spec.numChannels, 2u),
  893. spec.maximumBlockSize);
  894. }
  895. template <typename ProcessWet>
  896. void Convolution::Mixer::processSamples (const AudioBlock<const float>& input,
  897. AudioBlock<float>& output,
  898. bool isBypassed,
  899. ProcessWet&& processWet) noexcept
  900. {
  901. const auto numChannels = jmin (input.getNumChannels(), volumeDry.size());
  902. const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  903. auto dry = dryBlock.getSubsetChannelBlock (0, numChannels);
  904. if (volumeDry[0].isSmoothing())
  905. {
  906. dry.copyFrom (input);
  907. for (size_t channel = 0; channel < numChannels; ++channel)
  908. volumeDry[channel].applyGain (dry.getChannelPointer (channel), (int) numSamples);
  909. processWet (input, output);
  910. for (size_t channel = 0; channel < numChannels; ++channel)
  911. volumeWet[channel].applyGain (output.getChannelPointer (channel), (int) numSamples);
  912. output += dry;
  913. }
  914. else
  915. {
  916. if (! currentIsBypassed)
  917. processWet (input, output);
  918. if (isBypassed != currentIsBypassed)
  919. {
  920. currentIsBypassed = isBypassed;
  921. for (size_t channel = 0; channel < numChannels; ++channel)
  922. {
  923. volumeDry[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  924. volumeDry[channel].reset (sampleRate, 0.05);
  925. volumeDry[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  926. volumeWet[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  927. volumeWet[channel].reset (sampleRate, 0.05);
  928. volumeWet[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  929. }
  930. }
  931. }
  932. }
  933. void Convolution::Mixer::reset() { dryBlock.clear(); }
  934. //==============================================================================
  935. Convolution::Convolution()
  936. : Convolution (Latency { 0 })
  937. {}
  938. Convolution::Convolution (ConvolutionMessageQueue& queue)
  939. : Convolution (Latency { 0 }, queue)
  940. {}
  941. Convolution::Convolution (const Latency& requiredLatency)
  942. : Convolution (requiredLatency,
  943. {},
  944. OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
  945. {}
  946. Convolution::Convolution (const NonUniform& nonUniform)
  947. : Convolution ({},
  948. nonUniform,
  949. OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
  950. {}
  951. Convolution::Convolution (const Latency& requiredLatency, ConvolutionMessageQueue& queue)
  952. : Convolution (requiredLatency, {}, OptionalQueue { queue })
  953. {}
  954. Convolution::Convolution (const NonUniform& nonUniform, ConvolutionMessageQueue& queue)
  955. : Convolution ({}, nonUniform, OptionalQueue { queue })
  956. {}
  957. Convolution::Convolution (const Latency& latency,
  958. const NonUniform& nonUniform,
  959. OptionalQueue&& queue)
  960. : pimpl (std::make_unique<Impl> (latency, nonUniform, std::move (queue)))
  961. {}
  962. Convolution::~Convolution() noexcept = default;
  963. void Convolution::loadImpulseResponse (const void* sourceData,
  964. size_t sourceDataSize,
  965. Stereo stereo,
  966. Trim trim,
  967. size_t size,
  968. Normalise normalise)
  969. {
  970. pimpl->loadImpulseResponse (sourceData, sourceDataSize, stereo, trim, size, normalise);
  971. }
  972. void Convolution::loadImpulseResponse (const File& fileImpulseResponse,
  973. Stereo stereo,
  974. Trim trim,
  975. size_t size,
  976. Normalise normalise)
  977. {
  978. pimpl->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
  979. }
  980. void Convolution::loadImpulseResponse (AudioBuffer<float>&& buffer,
  981. double originalSampleRate,
  982. Stereo stereo,
  983. Trim trim,
  984. Normalise normalise)
  985. {
  986. pimpl->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
  987. }
  988. void Convolution::prepare (const ProcessSpec& spec)
  989. {
  990. mixer.prepare (spec);
  991. pimpl->prepare (spec);
  992. isActive = true;
  993. }
  994. void Convolution::reset() noexcept
  995. {
  996. mixer.reset();
  997. pimpl->reset();
  998. }
  999. void Convolution::processSamples (const AudioBlock<const float>& input,
  1000. AudioBlock<float>& output,
  1001. bool isBypassed) noexcept
  1002. {
  1003. if (! isActive)
  1004. return;
  1005. jassert (input.getNumChannels() == output.getNumChannels());
  1006. jassert (isPositiveAndBelow (input.getNumChannels(), static_cast<size_t> (3))); // only mono and stereo is supported
  1007. mixer.processSamples (input, output, isBypassed, [this] (const auto& in, auto& out)
  1008. {
  1009. pimpl->processSamples (in, out);
  1010. });
  1011. }
  1012. int Convolution::getCurrentIRSize() const { return pimpl->getCurrentIRSize(); }
  1013. int Convolution::getLatency() const { return pimpl->getLatency(); }
  1014. } // namespace dsp
  1015. } // namespace juce