The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1273 lines
48KB

  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE library.
  4. Copyright (c) 2020 - Raw Material Software Limited
  5. JUCE is an open source library subject to commercial or open-source
  6. licensing.
  7. By using JUCE, you agree to the terms of both the JUCE 6 End-User License
  8. Agreement and JUCE Privacy Policy (both effective as of the 16th June 2020).
  9. End User License Agreement: www.juce.com/juce-6-licence
  10. Privacy Policy: www.juce.com/juce-privacy-policy
  11. Or: You may also use this code under the terms of the GPL v3 (see
  12. www.gnu.org/licenses).
  13. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  14. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  15. DISCLAIMED.
  16. ==============================================================================
  17. */
  18. namespace juce
  19. {
  20. namespace dsp
  21. {
  22. template <typename Element>
  23. class Queue
  24. {
  25. public:
  26. explicit Queue (int size)
  27. : fifo (size), storage (static_cast<size_t> (size)) {}
  28. bool push (Element& element) noexcept
  29. {
  30. if (fifo.getFreeSpace() == 0)
  31. return false;
  32. const auto writer = fifo.write (1);
  33. if (writer.blockSize1 != 0)
  34. storage[static_cast<size_t> (writer.startIndex1)] = std::move (element);
  35. else if (writer.blockSize2 != 0)
  36. storage[static_cast<size_t> (writer.startIndex2)] = std::move (element);
  37. return true;
  38. }
  39. template <typename Fn>
  40. void pop (Fn&& fn) { popN (1, std::forward<Fn> (fn)); }
  41. template <typename Fn>
  42. void popAll (Fn&& fn) { popN (fifo.getNumReady(), std::forward<Fn> (fn)); }
  43. bool hasPendingMessages() const noexcept { return fifo.getNumReady() > 0; }
  44. private:
  45. template <typename Fn>
  46. void popN (int n, Fn&& fn)
  47. {
  48. fifo.read (n).forEach ([&] (int index)
  49. {
  50. fn (storage[static_cast<size_t> (index)]);
  51. });
  52. }
  53. AbstractFifo fifo;
  54. std::vector<Element> storage;
  55. };
  56. class BackgroundMessageQueue : private Thread
  57. {
  58. public:
  59. explicit BackgroundMessageQueue (int entries)
  60. : Thread ("Convolution background loader"), queue (entries)
  61. {
  62. startThread();
  63. }
  64. ~BackgroundMessageQueue() override
  65. {
  66. stopThread (-1);
  67. }
  68. using IncomingCommand = FixedSizeFunction<400, void()>;
  69. // Push functions here, and they'll be called later on a background thread.
  70. // This function is wait-free.
  71. // This function is only safe to call from a single thread at a time.
  72. bool push (IncomingCommand& command) { return queue.push (command); }
  73. private:
  74. void run() override
  75. {
  76. while (! threadShouldExit())
  77. {
  78. if (queue.hasPendingMessages())
  79. queue.pop ([] (IncomingCommand& command) { command(); command = nullptr;});
  80. else
  81. sleep (10);
  82. }
  83. }
  84. Queue<IncomingCommand> queue;
  85. };
  86. struct ConvolutionMessageQueue::Impl : public BackgroundMessageQueue
  87. {
  88. using BackgroundMessageQueue::BackgroundMessageQueue;
  89. };
  90. ConvolutionMessageQueue::ConvolutionMessageQueue()
  91. : ConvolutionMessageQueue (1000)
  92. {}
  93. ConvolutionMessageQueue::ConvolutionMessageQueue (int entries)
  94. : pimpl (std::make_unique<Impl> (entries))
  95. {}
  96. ConvolutionMessageQueue::~ConvolutionMessageQueue() noexcept = default;
  97. ConvolutionMessageQueue::ConvolutionMessageQueue (ConvolutionMessageQueue&&) noexcept = default;
  98. ConvolutionMessageQueue& ConvolutionMessageQueue::operator= (ConvolutionMessageQueue&&) noexcept = default;
  99. //==============================================================================
  100. struct ConvolutionEngine
  101. {
  102. ConvolutionEngine (const float* samples,
  103. size_t numSamples,
  104. size_t maxBlockSize)
  105. : blockSize ((size_t) nextPowerOfTwo ((int) maxBlockSize)),
  106. fftSize (blockSize > 128 ? 2 * blockSize : 4 * blockSize),
  107. fftObject (std::make_unique<FFT> (roundToInt (std::log2 (fftSize)))),
  108. numSegments (numSamples / (fftSize - blockSize) + 1u),
  109. numInputSegments ((blockSize > 128 ? numSegments : 3 * numSegments)),
  110. bufferInput (1, static_cast<int> (fftSize)),
  111. bufferOutput (1, static_cast<int> (fftSize * 2)),
  112. bufferTempOutput (1, static_cast<int> (fftSize * 2)),
  113. bufferOverlap (1, static_cast<int> (fftSize))
  114. {
  115. bufferOutput.clear();
  116. auto updateSegmentsIfNecessary = [this] (size_t numSegmentsToUpdate,
  117. std::vector<AudioBuffer<float>>& segments)
  118. {
  119. if (numSegmentsToUpdate == 0
  120. || numSegmentsToUpdate != (size_t) segments.size()
  121. || (size_t) segments[0].getNumSamples() != fftSize * 2)
  122. {
  123. segments.clear();
  124. for (size_t i = 0; i < numSegmentsToUpdate; ++i)
  125. segments.push_back ({ 1, static_cast<int> (fftSize * 2) });
  126. }
  127. };
  128. updateSegmentsIfNecessary (numInputSegments, buffersInputSegments);
  129. updateSegmentsIfNecessary (numSegments, buffersImpulseSegments);
  130. auto FFTTempObject = std::make_unique<FFT> (roundToInt (std::log2 (fftSize)));
  131. size_t currentPtr = 0;
  132. for (auto& buf : buffersImpulseSegments)
  133. {
  134. buf.clear();
  135. auto* impulseResponse = buf.getWritePointer (0);
  136. if (&buf == &buffersImpulseSegments.front())
  137. impulseResponse[0] = 1.0f;
  138. FloatVectorOperations::copy (impulseResponse,
  139. samples + currentPtr,
  140. static_cast<int> (jmin (fftSize - blockSize, numSamples - currentPtr)));
  141. FFTTempObject->performRealOnlyForwardTransform (impulseResponse);
  142. prepareForConvolution (impulseResponse);
  143. currentPtr += (fftSize - blockSize);
  144. }
  145. reset();
  146. }
  147. void reset()
  148. {
  149. bufferInput.clear();
  150. bufferOverlap.clear();
  151. bufferTempOutput.clear();
  152. bufferOutput.clear();
  153. for (auto& buf : buffersInputSegments)
  154. buf.clear();
  155. currentSegment = 0;
  156. inputDataPos = 0;
  157. }
  158. void processSamples (const float* input, float* output, size_t numSamples)
  159. {
  160. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  161. size_t numSamplesProcessed = 0;
  162. auto indexStep = numInputSegments / numSegments;
  163. auto* inputData = bufferInput.getWritePointer (0);
  164. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  165. auto* outputData = bufferOutput.getWritePointer (0);
  166. auto* overlapData = bufferOverlap.getWritePointer (0);
  167. while (numSamplesProcessed < numSamples)
  168. {
  169. const bool inputDataWasEmpty = (inputDataPos == 0);
  170. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  171. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  172. auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
  173. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (fftSize));
  174. fftObject->performRealOnlyForwardTransform (inputSegmentData);
  175. prepareForConvolution (inputSegmentData);
  176. // Complex multiplication
  177. if (inputDataWasEmpty)
  178. {
  179. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (fftSize + 1));
  180. auto index = currentSegment;
  181. for (size_t i = 1; i < numSegments; ++i)
  182. {
  183. index += indexStep;
  184. if (index >= numInputSegments)
  185. index -= numInputSegments;
  186. convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
  187. buffersImpulseSegments[i].getWritePointer (0),
  188. outputTempData);
  189. }
  190. }
  191. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (fftSize + 1));
  192. convolutionProcessingAndAccumulate (inputSegmentData,
  193. buffersImpulseSegments.front().getWritePointer (0),
  194. outputData);
  195. updateSymmetricFrequencyDomainData (outputData);
  196. fftObject->performRealOnlyInverseTransform (outputData);
  197. // Add overlap
  198. FloatVectorOperations::add (&output[numSamplesProcessed], &outputData[inputDataPos], &overlapData[inputDataPos], (int) numSamplesToProcess);
  199. // Input buffer full => Next block
  200. inputDataPos += numSamplesToProcess;
  201. if (inputDataPos == blockSize)
  202. {
  203. // Input buffer is empty again now
  204. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (fftSize));
  205. inputDataPos = 0;
  206. // Extra step for segSize > blockSize
  207. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (fftSize - 2 * blockSize));
  208. // Save the overlap
  209. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (fftSize - blockSize));
  210. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  211. }
  212. numSamplesProcessed += numSamplesToProcess;
  213. }
  214. }
  215. void processSamplesWithAddedLatency (const float* input, float* output, size_t numSamples)
  216. {
  217. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  218. size_t numSamplesProcessed = 0;
  219. auto indexStep = numInputSegments / numSegments;
  220. auto* inputData = bufferInput.getWritePointer (0);
  221. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  222. auto* outputData = bufferOutput.getWritePointer (0);
  223. auto* overlapData = bufferOverlap.getWritePointer (0);
  224. while (numSamplesProcessed < numSamples)
  225. {
  226. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  227. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  228. FloatVectorOperations::copy (output + numSamplesProcessed, outputData + inputDataPos, static_cast<int> (numSamplesToProcess));
  229. numSamplesProcessed += numSamplesToProcess;
  230. inputDataPos += numSamplesToProcess;
  231. // processing itself when needed (with latency)
  232. if (inputDataPos == blockSize)
  233. {
  234. // Copy input data in input segment
  235. auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
  236. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (fftSize));
  237. fftObject->performRealOnlyForwardTransform (inputSegmentData);
  238. prepareForConvolution (inputSegmentData);
  239. // Complex multiplication
  240. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (fftSize + 1));
  241. auto index = currentSegment;
  242. for (size_t i = 1; i < numSegments; ++i)
  243. {
  244. index += indexStep;
  245. if (index >= numInputSegments)
  246. index -= numInputSegments;
  247. convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
  248. buffersImpulseSegments[i].getWritePointer (0),
  249. outputTempData);
  250. }
  251. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (fftSize + 1));
  252. convolutionProcessingAndAccumulate (inputSegmentData,
  253. buffersImpulseSegments.front().getWritePointer (0),
  254. outputData);
  255. updateSymmetricFrequencyDomainData (outputData);
  256. fftObject->performRealOnlyInverseTransform (outputData);
  257. // Add overlap
  258. FloatVectorOperations::add (outputData, overlapData, static_cast<int> (blockSize));
  259. // Input buffer is empty again now
  260. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (fftSize));
  261. // Extra step for segSize > blockSize
  262. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (fftSize - 2 * blockSize));
  263. // Save the overlap
  264. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (fftSize - blockSize));
  265. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  266. inputDataPos = 0;
  267. }
  268. }
  269. }
  270. // After each FFT, this function is called to allow convolution to be performed with only 4 SIMD functions calls.
  271. void prepareForConvolution (float *samples) noexcept
  272. {
  273. auto FFTSizeDiv2 = fftSize / 2;
  274. for (size_t i = 0; i < FFTSizeDiv2; i++)
  275. samples[i] = samples[i << 1];
  276. samples[FFTSizeDiv2] = 0;
  277. for (size_t i = 1; i < FFTSizeDiv2; i++)
  278. samples[i + FFTSizeDiv2] = -samples[((fftSize - i) << 1) + 1];
  279. }
  280. // Does the convolution operation itself only on half of the frequency domain samples.
  281. void convolutionProcessingAndAccumulate (const float *input, const float *impulse, float *output)
  282. {
  283. auto FFTSizeDiv2 = fftSize / 2;
  284. FloatVectorOperations::addWithMultiply (output, input, impulse, static_cast<int> (FFTSizeDiv2));
  285. FloatVectorOperations::subtractWithMultiply (output, &(input[FFTSizeDiv2]), &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  286. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), input, &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  287. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), &(input[FFTSizeDiv2]), impulse, static_cast<int> (FFTSizeDiv2));
  288. output[fftSize] += input[fftSize] * impulse[fftSize];
  289. }
  290. // Undoes the re-organization of samples from the function prepareForConvolution.
  291. // Then takes the conjugate of the frequency domain first half of samples to fill the
  292. // second half, so that the inverse transform will return real samples in the time domain.
  293. void updateSymmetricFrequencyDomainData (float* samples) noexcept
  294. {
  295. auto FFTSizeDiv2 = fftSize / 2;
  296. for (size_t i = 1; i < FFTSizeDiv2; i++)
  297. {
  298. samples[(fftSize - i) << 1] = samples[i];
  299. samples[((fftSize - i) << 1) + 1] = -samples[FFTSizeDiv2 + i];
  300. }
  301. samples[1] = 0.f;
  302. for (size_t i = 1; i < FFTSizeDiv2; i++)
  303. {
  304. samples[i << 1] = samples[(fftSize - i) << 1];
  305. samples[(i << 1) + 1] = -samples[((fftSize - i) << 1) + 1];
  306. }
  307. }
  308. //==============================================================================
  309. const size_t blockSize;
  310. const size_t fftSize;
  311. const std::unique_ptr<FFT> fftObject;
  312. const size_t numSegments;
  313. const size_t numInputSegments;
  314. size_t currentSegment = 0, inputDataPos = 0;
  315. AudioBuffer<float> bufferInput, bufferOutput, bufferTempOutput, bufferOverlap;
  316. std::vector<AudioBuffer<float>> buffersInputSegments, buffersImpulseSegments;
  317. };
  318. //==============================================================================
  319. class MultichannelEngine
  320. {
  321. public:
  322. MultichannelEngine (const AudioBuffer<float>& buf,
  323. int maxBlockSize,
  324. int maxBufferSize,
  325. Convolution::NonUniform headSizeIn,
  326. bool isZeroDelayIn)
  327. : tailBuffer (1, maxBlockSize),
  328. latency (isZeroDelayIn ? 0 : maxBufferSize),
  329. irSize (buf.getNumSamples()),
  330. blockSize (maxBlockSize),
  331. isZeroDelay (isZeroDelayIn)
  332. {
  333. constexpr auto numChannels = 2;
  334. const auto makeEngine = [&] (int channel, int offset, int length, uint32 thisBlockSize)
  335. {
  336. return std::make_unique<ConvolutionEngine> (buf.getReadPointer (jmin (buf.getNumChannels() - 1, channel), offset),
  337. length,
  338. static_cast<size_t> (thisBlockSize));
  339. };
  340. if (headSizeIn.headSizeInSamples == 0)
  341. {
  342. for (int i = 0; i < numChannels; ++i)
  343. head.emplace_back (makeEngine (i, 0, buf.getNumSamples(), static_cast<uint32> (maxBufferSize)));
  344. }
  345. else
  346. {
  347. const auto size = jmin (buf.getNumSamples(), headSizeIn.headSizeInSamples);
  348. for (int i = 0; i < numChannels; ++i)
  349. head.emplace_back (makeEngine (i, 0, size, static_cast<uint32> (maxBufferSize)));
  350. const auto tailBufferSize = static_cast<uint32> (headSizeIn.headSizeInSamples + (isZeroDelay ? 0 : maxBufferSize));
  351. if (size != buf.getNumSamples())
  352. for (int i = 0; i < numChannels; ++i)
  353. tail.emplace_back (makeEngine (i, size, buf.getNumSamples() - size, tailBufferSize));
  354. }
  355. }
  356. void reset()
  357. {
  358. for (const auto& e : head)
  359. e->reset();
  360. for (const auto& e : tail)
  361. e->reset();
  362. }
  363. void processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output)
  364. {
  365. const auto numChannels = jmin (head.size(), input.getNumChannels(), output.getNumChannels());
  366. const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  367. const AudioBlock<float> fullTailBlock (tailBuffer);
  368. const auto tailBlock = fullTailBlock.getSubBlock (0, (size_t) numSamples);
  369. const auto isUniform = tail.empty();
  370. for (size_t channel = 0; channel < numChannels; ++channel)
  371. {
  372. if (! isUniform)
  373. tail[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
  374. tailBlock.getChannelPointer (0),
  375. numSamples);
  376. if (isZeroDelay)
  377. head[channel]->processSamples (input.getChannelPointer (channel),
  378. output.getChannelPointer (channel),
  379. numSamples);
  380. else
  381. head[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
  382. output.getChannelPointer (channel),
  383. numSamples);
  384. if (! isUniform)
  385. output.getSingleChannelBlock (channel) += tailBlock;
  386. }
  387. const auto numOutputChannels = output.getNumChannels();
  388. for (auto i = numChannels; i < numOutputChannels; ++i)
  389. output.getSingleChannelBlock (i).copyFrom (output.getSingleChannelBlock (0));
  390. }
  391. int getIRSize() const noexcept { return irSize; }
  392. int getLatency() const noexcept { return latency; }
  393. int getBlockSize() const noexcept { return blockSize; }
  394. private:
  395. std::vector<std::unique_ptr<ConvolutionEngine>> head, tail;
  396. AudioBuffer<float> tailBuffer;
  397. const int latency;
  398. const int irSize;
  399. const int blockSize;
  400. const bool isZeroDelay;
  401. };
  402. static AudioBuffer<float> fixNumChannels (const AudioBuffer<float>& buf, Convolution::Stereo stereo)
  403. {
  404. const auto numChannels = jmin (buf.getNumChannels(), stereo == Convolution::Stereo::yes ? 2 : 1);
  405. const auto numSamples = buf.getNumSamples();
  406. AudioBuffer<float> result (numChannels, buf.getNumSamples());
  407. for (auto channel = 0; channel != numChannels; ++channel)
  408. result.copyFrom (channel, 0, buf.getReadPointer (channel), numSamples);
  409. if (result.getNumSamples() == 0 || result.getNumChannels() == 0)
  410. {
  411. result.setSize (1, 1);
  412. result.setSample (0, 0, 1.0f);
  413. }
  414. return result;
  415. }
  416. static AudioBuffer<float> trimImpulseResponse (const AudioBuffer<float>& buf)
  417. {
  418. const auto thresholdTrim = Decibels::decibelsToGain (-80.0f);
  419. const auto numChannels = buf.getNumChannels();
  420. const auto numSamples = buf.getNumSamples();
  421. std::ptrdiff_t offsetBegin = numSamples;
  422. std::ptrdiff_t offsetEnd = numSamples;
  423. for (auto channel = 0; channel < numChannels; ++channel)
  424. {
  425. const auto indexAboveThreshold = [&] (auto begin, auto end)
  426. {
  427. return std::distance (begin, std::find_if (begin, end, [&] (float sample)
  428. {
  429. return std::abs (sample) >= thresholdTrim;
  430. }));
  431. };
  432. const auto channelBegin = buf.getReadPointer (channel);
  433. const auto channelEnd = channelBegin + numSamples;
  434. const auto itStart = indexAboveThreshold (channelBegin, channelEnd);
  435. const auto itEnd = indexAboveThreshold (std::make_reverse_iterator (channelEnd),
  436. std::make_reverse_iterator (channelBegin));
  437. offsetBegin = jmin (offsetBegin, itStart);
  438. offsetEnd = jmin (offsetEnd, itEnd);
  439. }
  440. if (offsetBegin == numSamples)
  441. {
  442. auto result = AudioBuffer<float> (numChannels, 1);
  443. result.clear();
  444. return result;
  445. }
  446. const auto newLength = jmax (1, numSamples - static_cast<int> (offsetBegin + offsetEnd));
  447. AudioBuffer<float> result (numChannels, newLength);
  448. for (auto channel = 0; channel < numChannels; ++channel)
  449. {
  450. result.copyFrom (channel,
  451. 0,
  452. buf.getReadPointer (channel, static_cast<int> (offsetBegin)),
  453. result.getNumSamples());
  454. }
  455. return result;
  456. }
  457. static float calculateNormalisationFactor (float sumSquaredMagnitude)
  458. {
  459. if (sumSquaredMagnitude < 1e-8f)
  460. return 1.0f;
  461. return 0.125f / std::sqrt (sumSquaredMagnitude);
  462. }
  463. static void normaliseImpulseResponse (AudioBuffer<float>& buf)
  464. {
  465. const auto numChannels = buf.getNumChannels();
  466. const auto numSamples = buf.getNumSamples();
  467. const auto channelPtrs = buf.getArrayOfWritePointers();
  468. const auto maxSumSquaredMag = std::accumulate (channelPtrs, channelPtrs + numChannels, 0.0f, [&] (auto max, auto* channel)
  469. {
  470. return jmax (max, std::accumulate (channel, channel + numSamples, 0.0f, [] (auto sum, auto samp)
  471. {
  472. return sum + (samp * samp);
  473. }));
  474. });
  475. const auto normalisationFactor = calculateNormalisationFactor (maxSumSquaredMag);
  476. std::for_each (channelPtrs, channelPtrs + numChannels, [&] (auto* channel)
  477. {
  478. FloatVectorOperations::multiply (channel, normalisationFactor, numSamples);
  479. });
  480. }
  481. static AudioBuffer<float> resampleImpulseResponse (const AudioBuffer<float>& buf,
  482. const double srcSampleRate,
  483. const double destSampleRate)
  484. {
  485. if (srcSampleRate == destSampleRate)
  486. return buf;
  487. const auto factorReading = srcSampleRate / destSampleRate;
  488. AudioBuffer<float> original = buf;
  489. MemoryAudioSource memorySource (original, false);
  490. ResamplingAudioSource resamplingSource (&memorySource, false, buf.getNumChannels());
  491. const auto finalSize = roundToInt (jmax (1.0, buf.getNumSamples() / factorReading));
  492. resamplingSource.setResamplingRatio (factorReading);
  493. resamplingSource.prepareToPlay (finalSize, srcSampleRate);
  494. AudioBuffer<float> result (buf.getNumChannels(), finalSize);
  495. resamplingSource.getNextAudioBlock ({ &result, 0, result.getNumSamples() });
  496. return result;
  497. }
  498. //==============================================================================
  499. template <typename Element>
  500. class TryLockedPtr
  501. {
  502. public:
  503. void set (std::unique_ptr<Element> p)
  504. {
  505. const SpinLock::ScopedLockType lock (mutex);
  506. ptr = std::move (p);
  507. }
  508. std::unique_ptr<MultichannelEngine> get()
  509. {
  510. const SpinLock::ScopedTryLockType lock (mutex);
  511. return lock.isLocked() ? std::move (ptr) : nullptr;
  512. }
  513. private:
  514. std::unique_ptr<Element> ptr;
  515. SpinLock mutex;
  516. };
  517. struct BufferWithSampleRate
  518. {
  519. BufferWithSampleRate() = default;
  520. BufferWithSampleRate (AudioBuffer<float>&& bufferIn, double sampleRateIn)
  521. : buffer (std::move (bufferIn)), sampleRate (sampleRateIn) {}
  522. AudioBuffer<float> buffer;
  523. double sampleRate = 0.0;
  524. };
  525. static BufferWithSampleRate loadStreamToBuffer (std::unique_ptr<InputStream> stream, size_t maxLength)
  526. {
  527. AudioFormatManager manager;
  528. manager.registerBasicFormats();
  529. std::unique_ptr<AudioFormatReader> formatReader (manager.createReaderFor (std::move (stream)));
  530. if (formatReader == nullptr)
  531. return {};
  532. const auto fileLength = static_cast<size_t> (formatReader->lengthInSamples);
  533. const auto lengthToLoad = maxLength == 0 ? fileLength : jmin (maxLength, fileLength);
  534. BufferWithSampleRate result { { jlimit (1, 2, static_cast<int> (formatReader->numChannels)),
  535. static_cast<int> (lengthToLoad) },
  536. formatReader->sampleRate };
  537. formatReader->read (result.buffer.getArrayOfWritePointers(),
  538. result.buffer.getNumChannels(),
  539. 0,
  540. result.buffer.getNumSamples());
  541. return result;
  542. }
  543. // This class caches the data required to build a new convolution engine
  544. // (in particular, impulse response data and a ProcessSpec).
  545. // Calls to `setProcessSpec` and `setImpulseResponse` construct a
  546. // new engine, which can be retrieved by calling `getEngine`.
  547. class ConvolutionEngineFactory
  548. {
  549. public:
  550. ConvolutionEngineFactory (Convolution::Latency requiredLatency,
  551. Convolution::NonUniform requiredHeadSize)
  552. : latency { (requiredLatency.latencyInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredLatency.latencyInSamples)) },
  553. headSize { (requiredHeadSize.headSizeInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredHeadSize.headSizeInSamples)) },
  554. shouldBeZeroLatency (requiredLatency.latencyInSamples == 0)
  555. {}
  556. // It is safe to call this method simultaneously with other public
  557. // member functions.
  558. void setProcessSpec (const ProcessSpec& spec)
  559. {
  560. const std::lock_guard<std::mutex> lock (mutex);
  561. processSpec = spec;
  562. engine.set (makeEngine());
  563. }
  564. // It is safe to call this method simultaneously with other public
  565. // member functions.
  566. void setImpulseResponse (BufferWithSampleRate&& buf,
  567. Convolution::Stereo stereo,
  568. Convolution::Trim trim,
  569. Convolution::Normalise normalise)
  570. {
  571. const std::lock_guard<std::mutex> lock (mutex);
  572. wantsNormalise = normalise;
  573. originalSampleRate = buf.sampleRate;
  574. impulseResponse = [&]
  575. {
  576. auto corrected = fixNumChannels (buf.buffer, stereo);
  577. return trim == Convolution::Trim::yes ? trimImpulseResponse (corrected) : corrected;
  578. }();
  579. engine.set (makeEngine());
  580. }
  581. // Returns the most recently-created engine, or nullptr
  582. // if there is no pending engine, or if the engine is currently
  583. // being updated by one of the setter methods.
  584. // It is safe to call this simultaneously with other public
  585. // member functions.
  586. std::unique_ptr<MultichannelEngine> getEngine() { return engine.get(); }
  587. private:
  588. std::unique_ptr<MultichannelEngine> makeEngine()
  589. {
  590. auto resampled = resampleImpulseResponse (impulseResponse, originalSampleRate, processSpec.sampleRate);
  591. if (wantsNormalise == Convolution::Normalise::yes)
  592. normaliseImpulseResponse (resampled);
  593. const auto currentLatency = jmax (processSpec.maximumBlockSize, (uint32) latency.latencyInSamples);
  594. const auto maxBufferSize = shouldBeZeroLatency ? static_cast<int> (processSpec.maximumBlockSize)
  595. : nextPowerOfTwo (static_cast<int> (currentLatency));
  596. return std::make_unique<MultichannelEngine> (resampled,
  597. processSpec.maximumBlockSize,
  598. maxBufferSize,
  599. headSize,
  600. shouldBeZeroLatency);
  601. }
  602. static AudioBuffer<float> makeImpulseBuffer()
  603. {
  604. AudioBuffer<float> result (1, 1);
  605. result.setSample (0, 0, 1.0f);
  606. return result;
  607. }
  608. ProcessSpec processSpec { 44100.0, 128, 2 };
  609. AudioBuffer<float> impulseResponse = makeImpulseBuffer();
  610. double originalSampleRate = processSpec.sampleRate;
  611. Convolution::Normalise wantsNormalise = Convolution::Normalise::no;
  612. const Convolution::Latency latency;
  613. const Convolution::NonUniform headSize;
  614. const bool shouldBeZeroLatency;
  615. TryLockedPtr<MultichannelEngine> engine;
  616. mutable std::mutex mutex;
  617. };
  618. static void setImpulseResponse (ConvolutionEngineFactory& factory,
  619. const void* sourceData,
  620. size_t sourceDataSize,
  621. Convolution::Stereo stereo,
  622. Convolution::Trim trim,
  623. size_t size,
  624. Convolution::Normalise normalise)
  625. {
  626. factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<MemoryInputStream> (sourceData, sourceDataSize, false), size),
  627. stereo, trim, normalise);
  628. }
  629. static void setImpulseResponse (ConvolutionEngineFactory& factory,
  630. const File& fileImpulseResponse,
  631. Convolution::Stereo stereo,
  632. Convolution::Trim trim,
  633. size_t size,
  634. Convolution::Normalise normalise)
  635. {
  636. factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<FileInputStream> (fileImpulseResponse), size),
  637. stereo, trim, normalise);
  638. }
  639. // This class acts as a destination for convolution engines which are loaded on
  640. // a background thread.
  641. // Deriving from `enable_shared_from_this` allows us to capture a reference to
  642. // this object when adding commands to the background message queue.
  643. // That way, we can avoid dangling references in the background thread in the case
  644. // that a Convolution instance is deleted before the background message queue.
  645. class ConvolutionEngineQueue : public std::enable_shared_from_this<ConvolutionEngineQueue>
  646. {
  647. public:
  648. ConvolutionEngineQueue (BackgroundMessageQueue& queue,
  649. Convolution::Latency latencyIn,
  650. Convolution::NonUniform headSizeIn)
  651. : messageQueue (queue), factory (latencyIn, headSizeIn) {}
  652. void loadImpulseResponse (AudioBuffer<float>&& buffer,
  653. double sr,
  654. Convolution::Stereo stereo,
  655. Convolution::Trim trim,
  656. Convolution::Normalise normalise)
  657. {
  658. callLater ([b = std::move (buffer), sr, stereo, trim, normalise] (ConvolutionEngineFactory& f) mutable
  659. {
  660. f.setImpulseResponse ({ std::move (b), sr }, stereo, trim, normalise);
  661. });
  662. }
  663. void loadImpulseResponse (const void* sourceData,
  664. size_t sourceDataSize,
  665. Convolution::Stereo stereo,
  666. Convolution::Trim trim,
  667. size_t size,
  668. Convolution::Normalise normalise)
  669. {
  670. callLater ([sourceData, sourceDataSize, stereo, trim, size, normalise] (ConvolutionEngineFactory& f) mutable
  671. {
  672. setImpulseResponse (f, sourceData, sourceDataSize, stereo, trim, size, normalise);
  673. });
  674. }
  675. void loadImpulseResponse (const File& fileImpulseResponse,
  676. Convolution::Stereo stereo,
  677. Convolution::Trim trim,
  678. size_t size,
  679. Convolution::Normalise normalise)
  680. {
  681. callLater ([fileImpulseResponse, stereo, trim, size, normalise] (ConvolutionEngineFactory& f) mutable
  682. {
  683. setImpulseResponse (f, fileImpulseResponse, stereo, trim, size, normalise);
  684. });
  685. }
  686. void prepare (const ProcessSpec& spec)
  687. {
  688. factory.setProcessSpec (spec);
  689. }
  690. // Call this regularly to try to resend any pending message.
  691. // This allows us to always apply the most recently requested
  692. // state (eventually), even if the message queue fills up.
  693. void postPendingCommand()
  694. {
  695. if (pendingCommand == nullptr)
  696. return;
  697. if (messageQueue.push (pendingCommand))
  698. pendingCommand = nullptr;
  699. }
  700. std::unique_ptr<MultichannelEngine> getEngine() { return factory.getEngine(); }
  701. private:
  702. template <typename Fn>
  703. void callLater (Fn&& fn)
  704. {
  705. // If there was already a pending command (because the queue was full) we'll end up deleting it here.
  706. // Not much we can do about that!
  707. pendingCommand = [weak = weakFromThis(), callback = std::forward<Fn> (fn)]() mutable
  708. {
  709. if (auto t = weak.lock())
  710. callback (t->factory);
  711. };
  712. postPendingCommand();
  713. }
  714. std::weak_ptr<ConvolutionEngineQueue> weakFromThis() { return shared_from_this(); }
  715. BackgroundMessageQueue& messageQueue;
  716. ConvolutionEngineFactory factory;
  717. BackgroundMessageQueue::IncomingCommand pendingCommand;
  718. };
  719. class CrossoverMixer
  720. {
  721. public:
  722. void reset()
  723. {
  724. smoother.setCurrentAndTargetValue (1.0f);
  725. }
  726. void prepare (const ProcessSpec& spec)
  727. {
  728. smoother.reset (spec.sampleRate, 0.05);
  729. smootherBuffer.setSize (1, static_cast<int> (spec.maximumBlockSize));
  730. mixBuffer.setSize (static_cast<int> (spec.numChannels), static_cast<int> (spec.maximumBlockSize));
  731. reset();
  732. }
  733. template <typename ProcessCurrent, typename ProcessPrevious, typename NotifyDone>
  734. void processSamples (const AudioBlock<const float>& input,
  735. AudioBlock<float>& output,
  736. ProcessCurrent&& current,
  737. ProcessPrevious&& previous,
  738. NotifyDone&& notifyDone)
  739. {
  740. if (smoother.isSmoothing())
  741. {
  742. const auto numSamples = static_cast<int> (input.getNumSamples());
  743. for (auto sample = 0; sample != numSamples; ++sample)
  744. smootherBuffer.setSample (0, sample, smoother.getNextValue());
  745. AudioBlock<float> mixBlock (mixBuffer);
  746. mixBlock.clear();
  747. previous (input, mixBlock);
  748. for (size_t channel = 0; channel != output.getNumChannels(); ++channel)
  749. {
  750. FloatVectorOperations::multiply (mixBlock.getChannelPointer (channel),
  751. smootherBuffer.getReadPointer (0),
  752. numSamples);
  753. }
  754. FloatVectorOperations::multiply (smootherBuffer.getWritePointer (0), -1.0f, numSamples);
  755. FloatVectorOperations::add (smootherBuffer.getWritePointer (0), 1.0f, numSamples);
  756. current (input, output);
  757. for (size_t channel = 0; channel != output.getNumChannels(); ++channel)
  758. {
  759. FloatVectorOperations::multiply (output.getChannelPointer (channel),
  760. smootherBuffer.getReadPointer (0),
  761. numSamples);
  762. FloatVectorOperations::add (output.getChannelPointer (channel),
  763. mixBlock.getChannelPointer (channel),
  764. numSamples);
  765. }
  766. if (! smoother.isSmoothing())
  767. notifyDone();
  768. }
  769. else
  770. {
  771. current (input, output);
  772. }
  773. }
  774. void beginTransition()
  775. {
  776. smoother.setCurrentAndTargetValue (1.0f);
  777. smoother.setTargetValue (0.0f);
  778. }
  779. private:
  780. LinearSmoothedValue<float> smoother;
  781. AudioBuffer<float> smootherBuffer;
  782. AudioBuffer<float> mixBuffer;
  783. };
  784. using OptionalQueue = OptionalScopedPointer<ConvolutionMessageQueue>;
  785. class Convolution::Impl
  786. {
  787. public:
  788. Impl (Latency requiredLatency,
  789. NonUniform requiredHeadSize,
  790. OptionalQueue&& queue)
  791. : messageQueue (std::move (queue)),
  792. engineQueue (std::make_shared<ConvolutionEngineQueue> (*messageQueue->pimpl,
  793. requiredLatency,
  794. requiredHeadSize))
  795. {}
  796. void reset()
  797. {
  798. mixer.reset();
  799. if (currentEngine != nullptr)
  800. currentEngine->reset();
  801. destroyPreviousEngine();
  802. }
  803. void prepare (const ProcessSpec& spec)
  804. {
  805. mixer.prepare (spec);
  806. engineQueue->prepare (spec);
  807. installPendingEngine();
  808. jassert (currentEngine != nullptr);
  809. }
  810. void processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output)
  811. {
  812. engineQueue->postPendingCommand();
  813. if (previousEngine == nullptr)
  814. installPendingEngine();
  815. mixer.processSamples (input,
  816. output,
  817. [this] (const AudioBlock<const float>& in, AudioBlock<float>& out)
  818. {
  819. currentEngine->processSamples (in, out);
  820. },
  821. [this] (const AudioBlock<const float>& in, AudioBlock<float>& out)
  822. {
  823. if (previousEngine != nullptr)
  824. previousEngine->processSamples (in, out);
  825. else
  826. out.copyFrom (in);
  827. },
  828. [this] { destroyPreviousEngine(); });
  829. }
  830. int getCurrentIRSize() const { return currentEngine != nullptr ? currentEngine->getIRSize() : 0; }
  831. int getLatency() const { return currentEngine != nullptr ? currentEngine->getLatency() : 0; }
  832. void loadImpulseResponse (AudioBuffer<float>&& buffer,
  833. double originalSampleRate,
  834. Stereo stereo,
  835. Trim trim,
  836. Normalise normalise)
  837. {
  838. engineQueue->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
  839. }
  840. void loadImpulseResponse (const void* sourceData,
  841. size_t sourceDataSize,
  842. Stereo stereo,
  843. Trim trim,
  844. size_t size,
  845. Normalise normalise)
  846. {
  847. engineQueue->loadImpulseResponse (sourceData, sourceDataSize, stereo, trim, size, normalise);
  848. }
  849. void loadImpulseResponse (const File& fileImpulseResponse,
  850. Stereo stereo,
  851. Trim trim,
  852. size_t size,
  853. Normalise normalise)
  854. {
  855. engineQueue->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
  856. }
  857. private:
  858. void destroyPreviousEngine()
  859. {
  860. // If the queue is full, we'll destroy this straight away
  861. BackgroundMessageQueue::IncomingCommand command = [p = std::move (previousEngine)]() mutable { p = nullptr; };
  862. messageQueue->pimpl->push (command);
  863. }
  864. void installNewEngine (std::unique_ptr<MultichannelEngine> newEngine)
  865. {
  866. destroyPreviousEngine();
  867. previousEngine = std::move (currentEngine);
  868. currentEngine = std::move (newEngine);
  869. mixer.beginTransition();
  870. }
  871. void installPendingEngine()
  872. {
  873. if (auto newEngine = engineQueue->getEngine())
  874. installNewEngine (std::move (newEngine));
  875. }
  876. OptionalQueue messageQueue;
  877. std::shared_ptr<ConvolutionEngineQueue> engineQueue;
  878. std::unique_ptr<MultichannelEngine> previousEngine, currentEngine;
  879. CrossoverMixer mixer;
  880. };
  881. //==============================================================================
  882. void Convolution::Mixer::prepare (const ProcessSpec& spec)
  883. {
  884. for (auto& dry : volumeDry)
  885. dry.reset (spec.sampleRate, 0.05);
  886. for (auto& wet : volumeWet)
  887. wet.reset (spec.sampleRate, 0.05);
  888. sampleRate = spec.sampleRate;
  889. dryBlock = AudioBlock<float> (dryBlockStorage,
  890. jmin (spec.numChannels, 2u),
  891. spec.maximumBlockSize);
  892. }
  893. template <typename ProcessWet>
  894. void Convolution::Mixer::processSamples (const AudioBlock<const float>& input,
  895. AudioBlock<float>& output,
  896. bool isBypassed,
  897. ProcessWet&& processWet) noexcept
  898. {
  899. const auto numChannels = jmin (input.getNumChannels(), volumeDry.size());
  900. const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  901. auto dry = dryBlock.getSubsetChannelBlock (0, numChannels);
  902. if (volumeDry[0].isSmoothing())
  903. {
  904. dry.copyFrom (input);
  905. for (size_t channel = 0; channel < numChannels; ++channel)
  906. volumeDry[channel].applyGain (dry.getChannelPointer (channel), (int) numSamples);
  907. processWet (input, output);
  908. for (size_t channel = 0; channel < numChannels; ++channel)
  909. volumeWet[channel].applyGain (output.getChannelPointer (channel), (int) numSamples);
  910. output += dry;
  911. }
  912. else
  913. {
  914. if (! currentIsBypassed)
  915. processWet (input, output);
  916. if (isBypassed != currentIsBypassed)
  917. {
  918. currentIsBypassed = isBypassed;
  919. for (size_t channel = 0; channel < numChannels; ++channel)
  920. {
  921. volumeDry[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  922. volumeDry[channel].reset (sampleRate, 0.05);
  923. volumeDry[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  924. volumeWet[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  925. volumeWet[channel].reset (sampleRate, 0.05);
  926. volumeWet[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  927. }
  928. }
  929. }
  930. }
  931. void Convolution::Mixer::reset() { dryBlock.clear(); }
  932. //==============================================================================
  933. Convolution::Convolution()
  934. : Convolution (Latency { 0 })
  935. {}
  936. Convolution::Convolution (ConvolutionMessageQueue& queue)
  937. : Convolution (Latency { 0 }, queue)
  938. {}
  939. Convolution::Convolution (const Latency& requiredLatency)
  940. : Convolution (requiredLatency,
  941. {},
  942. OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
  943. {}
  944. Convolution::Convolution (const NonUniform& nonUniform)
  945. : Convolution ({},
  946. nonUniform,
  947. OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
  948. {}
  949. Convolution::Convolution (const Latency& requiredLatency, ConvolutionMessageQueue& queue)
  950. : Convolution (requiredLatency, {}, OptionalQueue { queue })
  951. {}
  952. Convolution::Convolution (const NonUniform& nonUniform, ConvolutionMessageQueue& queue)
  953. : Convolution ({}, nonUniform, OptionalQueue { queue })
  954. {}
  955. Convolution::Convolution (const Latency& latency,
  956. const NonUniform& nonUniform,
  957. OptionalQueue&& queue)
  958. : pimpl (std::make_unique<Impl> (latency, nonUniform, std::move (queue)))
  959. {}
  960. Convolution::~Convolution() noexcept = default;
  961. void Convolution::loadImpulseResponse (const void* sourceData,
  962. size_t sourceDataSize,
  963. Stereo stereo,
  964. Trim trim,
  965. size_t size,
  966. Normalise normalise)
  967. {
  968. pimpl->loadImpulseResponse (sourceData, sourceDataSize, stereo, trim, size, normalise);
  969. }
  970. void Convolution::loadImpulseResponse (const File& fileImpulseResponse,
  971. Stereo stereo,
  972. Trim trim,
  973. size_t size,
  974. Normalise normalise)
  975. {
  976. pimpl->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
  977. }
  978. void Convolution::loadImpulseResponse (AudioBuffer<float>&& buffer,
  979. double originalSampleRate,
  980. Stereo stereo,
  981. Trim trim,
  982. Normalise normalise)
  983. {
  984. pimpl->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
  985. }
  986. void Convolution::prepare (const ProcessSpec& spec)
  987. {
  988. mixer.prepare (spec);
  989. pimpl->prepare (spec);
  990. isActive = true;
  991. }
  992. void Convolution::reset() noexcept
  993. {
  994. mixer.reset();
  995. pimpl->reset();
  996. }
  997. void Convolution::processSamples (const AudioBlock<const float>& input,
  998. AudioBlock<float>& output,
  999. bool isBypassed) noexcept
  1000. {
  1001. if (! isActive)
  1002. return;
  1003. jassert (input.getNumChannels() == output.getNumChannels());
  1004. jassert (isPositiveAndBelow (input.getNumChannels(), static_cast<size_t> (3))); // only mono and stereo is supported
  1005. mixer.processSamples (input, output, isBypassed, [this] (const auto& in, auto& out)
  1006. {
  1007. pimpl->processSamples (in, out);
  1008. });
  1009. }
  1010. int Convolution::getCurrentIRSize() const { return pimpl->getCurrentIRSize(); }
  1011. int Convolution::getLatency() const { return pimpl->getLatency(); }
  1012. } // namespace dsp
  1013. } // namespace juce