The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1243 lines
47KB

  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE 6 technical preview.
  4. Copyright (c) 2020 - Raw Material Software Limited
  5. You may use this code under the terms of the GPL v3
  6. (see www.gnu.org/licenses).
  7. For this technical preview, this file is not subject to commercial licensing.
  8. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  9. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  10. DISCLAIMED.
  11. ==============================================================================
  12. */
  13. namespace juce
  14. {
  15. namespace dsp
  16. {
  17. /** This class is the convolution engine itself, processing only one channel at
  18. a time of input signal.
  19. */
  20. struct ConvolutionEngine
  21. {
  22. ConvolutionEngine() = default;
  23. //==============================================================================
  24. struct ProcessingInformation
  25. {
  26. enum class SourceType
  27. {
  28. sourceBinaryData,
  29. sourceAudioFile,
  30. sourceAudioBuffer,
  31. sourceNone
  32. };
  33. SourceType sourceType = SourceType::sourceNone;
  34. const void* sourceData;
  35. int sourceDataSize;
  36. File fileImpulseResponse;
  37. double originalSampleRate;
  38. int originalSize = 0;
  39. int originalNumChannels = 1;
  40. AudioBuffer<float>* buffer;
  41. bool wantsStereo = true;
  42. bool wantsTrimming = true;
  43. bool wantsNormalisation = true;
  44. int64 wantedSize = 0;
  45. int finalSize = 0;
  46. double sampleRate = 0;
  47. size_t maximumBufferSize = 0;
  48. };
  49. //==============================================================================
  50. void reset()
  51. {
  52. bufferInput.clear();
  53. bufferOverlap.clear();
  54. bufferTempOutput.clear();
  55. for (auto i = 0; i < buffersInputSegments.size(); ++i)
  56. buffersInputSegments.getReference (i).clear();
  57. currentSegment = 0;
  58. inputDataPos = 0;
  59. }
  60. /** Initalize all the states and objects to perform the convolution. */
  61. void initializeConvolutionEngine (ProcessingInformation& info, int channel)
  62. {
  63. blockSize = (size_t) nextPowerOfTwo ((int) info.maximumBufferSize);
  64. FFTSize = blockSize > 128 ? 2 * blockSize
  65. : 4 * blockSize;
  66. numSegments = ((size_t) info.finalSize) / (FFTSize - blockSize) + 1u;
  67. numInputSegments = (blockSize > 128 ? numSegments : 3 * numSegments);
  68. FFTobject = std::make_unique<FFT> (roundToInt (std::log2 (FFTSize)));
  69. bufferInput.setSize (1, static_cast<int> (FFTSize));
  70. bufferOutput.setSize (1, static_cast<int> (FFTSize * 2));
  71. bufferTempOutput.setSize (1, static_cast<int> (FFTSize * 2));
  72. bufferOverlap.setSize (1, static_cast<int> (FFTSize));
  73. buffersInputSegments.clear();
  74. buffersImpulseSegments.clear();
  75. bufferOutput.clear();
  76. for (size_t i = 0; i < numInputSegments; ++i)
  77. {
  78. AudioBuffer<float> newInputSegment;
  79. newInputSegment.setSize (1, static_cast<int> (FFTSize * 2));
  80. buffersInputSegments.add (newInputSegment);
  81. }
  82. for (auto i = 0u; i < numSegments; ++i)
  83. {
  84. AudioBuffer<float> newImpulseSegment;
  85. newImpulseSegment.setSize (1, static_cast<int> (FFTSize * 2));
  86. buffersImpulseSegments.add (newImpulseSegment);
  87. }
  88. std::unique_ptr<FFT> FFTTempObject (new FFT (roundToInt (std::log2 (FFTSize))));
  89. auto* channelData = info.buffer->getWritePointer (channel);
  90. for (size_t n = 0; n < numSegments; ++n)
  91. {
  92. buffersImpulseSegments.getReference (static_cast<int> (n)).clear();
  93. auto* impulseResponse = buffersImpulseSegments.getReference (static_cast<int> (n)).getWritePointer (0);
  94. if (n == 0)
  95. impulseResponse[0] = 1.0f;
  96. for (size_t i = 0; i < FFTSize - blockSize; ++i)
  97. if (i + n * (FFTSize - blockSize) < (size_t) info.finalSize)
  98. impulseResponse[i] = channelData[i + n * (FFTSize - blockSize)];
  99. FFTTempObject->performRealOnlyForwardTransform (impulseResponse);
  100. prepareForConvolution (impulseResponse);
  101. }
  102. reset();
  103. isReady = true;
  104. }
  105. /** Copy the states of another engine. */
  106. void copyStateFromOtherEngine (const ConvolutionEngine& other)
  107. {
  108. if (FFTSize != other.FFTSize)
  109. {
  110. FFTobject.reset (new FFT (roundToInt (std::log2 (other.FFTSize))));
  111. FFTSize = other.FFTSize;
  112. }
  113. currentSegment = other.currentSegment;
  114. numInputSegments = other.numInputSegments;
  115. numSegments = other.numSegments;
  116. blockSize = other.blockSize;
  117. inputDataPos = other.inputDataPos;
  118. bufferInput = other.bufferInput;
  119. bufferTempOutput = other.bufferTempOutput;
  120. bufferOutput = other.bufferOutput;
  121. buffersInputSegments = other.buffersInputSegments;
  122. buffersImpulseSegments = other.buffersImpulseSegments;
  123. bufferOverlap = other.bufferOverlap;
  124. isReady = true;
  125. }
  126. /** Performs the uniform partitioned convolution using FFT. */
  127. void processSamples (const float* input, float* output, size_t numSamples)
  128. {
  129. if (! isReady)
  130. return;
  131. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  132. size_t numSamplesProcessed = 0;
  133. auto indexStep = numInputSegments / numSegments;
  134. auto* inputData = bufferInput.getWritePointer (0);
  135. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  136. auto* outputData = bufferOutput.getWritePointer (0);
  137. auto* overlapData = bufferOverlap.getWritePointer (0);
  138. while (numSamplesProcessed < numSamples)
  139. {
  140. const bool inputDataWasEmpty = (inputDataPos == 0);
  141. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  142. // copy the input samples
  143. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  144. auto* inputSegmentData = buffersInputSegments.getReference (static_cast<int> (currentSegment)).getWritePointer (0);
  145. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (FFTSize));
  146. // Forward FFT
  147. FFTobject->performRealOnlyForwardTransform (inputSegmentData);
  148. prepareForConvolution (inputSegmentData);
  149. // Complex multiplication
  150. if (inputDataWasEmpty)
  151. {
  152. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (FFTSize + 1));
  153. auto index = currentSegment;
  154. for (size_t i = 1; i < numSegments; ++i)
  155. {
  156. index += indexStep;
  157. if (index >= numInputSegments)
  158. index -= numInputSegments;
  159. convolutionProcessingAndAccumulate (buffersInputSegments.getReference (static_cast<int> (index)).getWritePointer (0),
  160. buffersImpulseSegments.getReference (static_cast<int> (i)).getWritePointer (0),
  161. outputTempData);
  162. }
  163. }
  164. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (FFTSize + 1));
  165. convolutionProcessingAndAccumulate (buffersInputSegments.getReference (static_cast<int> (currentSegment)).getWritePointer (0),
  166. buffersImpulseSegments.getReference (0).getWritePointer (0),
  167. outputData);
  168. // Inverse FFT
  169. updateSymmetricFrequencyDomainData (outputData);
  170. FFTobject->performRealOnlyInverseTransform (outputData);
  171. // Add overlap
  172. for (size_t i = 0; i < numSamplesToProcess; ++i)
  173. output[i + numSamplesProcessed] = outputData[inputDataPos + i] + overlapData[inputDataPos + i];
  174. // Input buffer full => Next block
  175. inputDataPos += numSamplesToProcess;
  176. if (inputDataPos == blockSize)
  177. {
  178. // Input buffer is empty again now
  179. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (FFTSize));
  180. inputDataPos = 0;
  181. // Extra step for segSize > blockSize
  182. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (FFTSize - 2 * blockSize));
  183. // Save the overlap
  184. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (FFTSize - blockSize));
  185. // Update current segment
  186. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  187. }
  188. numSamplesProcessed += numSamplesToProcess;
  189. }
  190. }
  191. /** After each FFT, this function is called to allow convolution to be performed with only 4 SIMD functions calls. */
  192. void prepareForConvolution (float *samples) noexcept
  193. {
  194. auto FFTSizeDiv2 = FFTSize / 2;
  195. for (size_t i = 0; i < FFTSizeDiv2; i++)
  196. samples[i] = samples[2 * i];
  197. samples[FFTSizeDiv2] = 0;
  198. for (size_t i = 1; i < FFTSizeDiv2; i++)
  199. samples[i + FFTSizeDiv2] = -samples[2 * (FFTSize - i) + 1];
  200. }
  201. /** Does the convolution operation itself only on half of the frequency domain samples. */
  202. void convolutionProcessingAndAccumulate (const float *input, const float *impulse, float *output)
  203. {
  204. auto FFTSizeDiv2 = FFTSize / 2;
  205. FloatVectorOperations::addWithMultiply (output, input, impulse, static_cast<int> (FFTSizeDiv2));
  206. FloatVectorOperations::subtractWithMultiply (output, &(input[FFTSizeDiv2]), &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  207. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), input, &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  208. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), &(input[FFTSizeDiv2]), impulse, static_cast<int> (FFTSizeDiv2));
  209. output[FFTSize] += input[FFTSize] * impulse[FFTSize];
  210. }
  211. /** Undo the re-organization of samples from the function prepareForConvolution.
  212. Then, takes the conjugate of the frequency domain first half of samples, to fill the
  213. second half, so that the inverse transform will return real samples in the time domain.
  214. */
  215. void updateSymmetricFrequencyDomainData (float* samples) noexcept
  216. {
  217. auto FFTSizeDiv2 = FFTSize / 2;
  218. for (size_t i = 1; i < FFTSizeDiv2; i++)
  219. {
  220. samples[2 * (FFTSize - i)] = samples[i];
  221. samples[2 * (FFTSize - i) + 1] = -samples[FFTSizeDiv2 + i];
  222. }
  223. samples[1] = 0.f;
  224. for (size_t i = 1; i < FFTSizeDiv2; i++)
  225. {
  226. samples[2 * i] = samples[2 * (FFTSize - i)];
  227. samples[2 * i + 1] = -samples[2 * (FFTSize - i) + 1];
  228. }
  229. }
  230. //==============================================================================
  231. std::unique_ptr<FFT> FFTobject;
  232. size_t FFTSize = 0;
  233. size_t currentSegment = 0, numInputSegments = 0, numSegments = 0, blockSize = 0, inputDataPos = 0;
  234. AudioBuffer<float> bufferInput, bufferOutput, bufferTempOutput, bufferOverlap;
  235. Array<AudioBuffer<float>> buffersInputSegments, buffersImpulseSegments;
  236. bool isReady = false;
  237. //==============================================================================
  238. JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (ConvolutionEngine)
  239. };
  240. //==============================================================================
  241. /** Manages all the changes requested by the main convolution engine, to minimize
  242. the number of calls of the convolution engine initialization, and the potential
  243. consequences of multiple quick calls to the function Convolution::loadImpulseResponse.
  244. */
  245. struct Convolution::Pimpl : private Thread
  246. {
  247. enum class ChangeRequest
  248. {
  249. changeEngine = 0,
  250. changeSampleRate,
  251. changeMaximumBufferSize,
  252. changeSource,
  253. changeImpulseResponseSize,
  254. changeStereo,
  255. changeTrimming,
  256. changeNormalisation,
  257. changeIgnore,
  258. numChangeRequestTypes
  259. };
  260. using SourceType = ConvolutionEngine::ProcessingInformation::SourceType;
  261. //==============================================================================
  262. Pimpl() : Thread ("Convolution"), abstractFifo (fifoSize)
  263. {
  264. abstractFifo.reset();
  265. fifoRequestsType.resize (fifoSize);
  266. fifoRequestsParameter.resize (fifoSize);
  267. requestsType.resize (fifoSize);
  268. requestsParameter.resize (fifoSize);
  269. for (auto i = 0; i < 4; ++i)
  270. engines.add (new ConvolutionEngine());
  271. currentInfo.maximumBufferSize = 0;
  272. currentInfo.buffer = &impulseResponse;
  273. temporaryBuffer.setSize (2, static_cast<int> (maximumTimeInSamples), false, false, true);
  274. impulseResponseOriginal.setSize (2, static_cast<int> (maximumTimeInSamples), false, false, true);
  275. impulseResponse.setSize (2, static_cast<int> (maximumTimeInSamples), false, false, true);
  276. }
  277. ~Pimpl() override
  278. {
  279. stopThread (10000);
  280. }
  281. //==============================================================================
  282. /** Inits the size of the interpolation buffer. */
  283. void initProcessing (int maximumBufferSize)
  284. {
  285. stopThread (1000);
  286. interpolationBuffer.setSize (1, maximumBufferSize, false, false, true);
  287. mustInterpolate = false;
  288. }
  289. //==============================================================================
  290. /** Adds a new change request. */
  291. void addToFifo (ChangeRequest type, juce::var parameter)
  292. {
  293. int start1, size1, start2, size2;
  294. abstractFifo.prepareToWrite (1, start1, size1, start2, size2);
  295. // If you hit this assertion then you have requested more impulse response
  296. // changes than the Convolution class can handle.
  297. jassert (size1 + size2 > 0);
  298. if (size1 > 0)
  299. {
  300. fifoRequestsType.setUnchecked (start1, type);
  301. fifoRequestsParameter.setUnchecked (start1, parameter);
  302. }
  303. if (size2 > 0)
  304. {
  305. fifoRequestsType.setUnchecked (start2, type);
  306. fifoRequestsParameter.setUnchecked (start2, parameter);
  307. }
  308. abstractFifo.finishedWrite (size1 + size2);
  309. }
  310. /** Adds a new array of change requests. */
  311. void addToFifo (ChangeRequest* types, juce::var* parameters, int numEntries)
  312. {
  313. int start1, size1, start2, size2;
  314. abstractFifo.prepareToWrite (numEntries, start1, size1, start2, size2);
  315. // If you hit this assertion then you have requested more impulse response
  316. // changes than the Convolution class can handle.
  317. jassert (numEntries > 0 && size1 + size2 > 0);
  318. if (size1 > 0)
  319. {
  320. for (auto i = 0; i < size1; ++i)
  321. {
  322. fifoRequestsType.setUnchecked (start1 + i, types[i]);
  323. fifoRequestsParameter.setUnchecked (start1 + i, parameters[i]);
  324. }
  325. }
  326. if (size2 > 0)
  327. {
  328. for (auto i = 0; i < size2; ++i)
  329. {
  330. fifoRequestsType.setUnchecked (start2 + i, types[i + size1]);
  331. fifoRequestsParameter.setUnchecked (start2 + i, parameters[i + size1]);
  332. }
  333. }
  334. abstractFifo.finishedWrite (size1 + size2);
  335. }
  336. /** Reads requests from the fifo. */
  337. void readFromFifo (ChangeRequest& type, juce::var& parameter)
  338. {
  339. int start1, size1, start2, size2;
  340. abstractFifo.prepareToRead (1, start1, size1, start2, size2);
  341. if (size1 > 0)
  342. {
  343. type = fifoRequestsType[start1];
  344. parameter = fifoRequestsParameter[start1];
  345. }
  346. if (size2 > 0)
  347. {
  348. type = fifoRequestsType[start2];
  349. parameter = fifoRequestsParameter[start2];
  350. }
  351. abstractFifo.finishedRead (size1 + size2);
  352. }
  353. /** Returns the number of requests that still need to be processed. */
  354. int getNumRemainingEntries() const noexcept
  355. {
  356. return abstractFifo.getNumReady();
  357. }
  358. //==============================================================================
  359. /** This function processes all the change requests to remove all the the
  360. redundant ones, and to tell what kind of initialization must be done.
  361. Depending on the results, the convolution engines might be reset, or
  362. simply updated, or they might not need any change at all.
  363. */
  364. void processFifo()
  365. {
  366. if (getNumRemainingEntries() == 0 || isThreadRunning() || mustInterpolate)
  367. return;
  368. auto numRequests = 0;
  369. // retrieve the information from the FIFO for processing
  370. while (getNumRemainingEntries() > 0 && numRequests < fifoSize)
  371. {
  372. ChangeRequest type = ChangeRequest::changeEngine;
  373. juce::var parameter;
  374. readFromFifo (type, parameter);
  375. requestsType.setUnchecked (numRequests, type);
  376. requestsParameter.setUnchecked (numRequests, parameter);
  377. numRequests++;
  378. }
  379. // remove any useless messages
  380. for (auto i = 0; i < (int) ChangeRequest::numChangeRequestTypes; ++i)
  381. {
  382. bool exists = false;
  383. for (auto n = numRequests; --n >= 0;)
  384. {
  385. if (requestsType[n] == (ChangeRequest) i)
  386. {
  387. if (! exists)
  388. exists = true;
  389. else
  390. requestsType.setUnchecked (n, ChangeRequest::changeIgnore);
  391. }
  392. }
  393. }
  394. changeLevel = 0;
  395. for (auto n = 0; n < numRequests; ++n)
  396. {
  397. switch (requestsType[n])
  398. {
  399. case ChangeRequest::changeEngine:
  400. changeLevel = 3;
  401. break;
  402. case ChangeRequest::changeSampleRate:
  403. {
  404. double newSampleRate = requestsParameter[n];
  405. if (currentInfo.sampleRate != newSampleRate)
  406. changeLevel = 3;
  407. currentInfo.sampleRate = newSampleRate;
  408. }
  409. break;
  410. case ChangeRequest::changeMaximumBufferSize:
  411. {
  412. int newMaximumBufferSize = requestsParameter[n];
  413. if (currentInfo.maximumBufferSize != (size_t) newMaximumBufferSize)
  414. changeLevel = 3;
  415. currentInfo.maximumBufferSize = (size_t) newMaximumBufferSize;
  416. }
  417. break;
  418. case ChangeRequest::changeSource:
  419. {
  420. auto* arrayParameters = requestsParameter[n].getArray();
  421. auto newSourceType = static_cast<SourceType> (static_cast<int> (arrayParameters->getUnchecked (0)));
  422. if (currentInfo.sourceType != newSourceType)
  423. changeLevel = jmax (2, changeLevel);
  424. if (newSourceType == SourceType::sourceBinaryData)
  425. {
  426. auto& prm = arrayParameters->getRawDataPointer()[1];
  427. auto* newMemoryBlock = prm.getBinaryData();
  428. auto* newPtr = newMemoryBlock->getData();
  429. auto newSize = (int) newMemoryBlock->getSize();
  430. if (currentInfo.sourceData != newPtr || currentInfo.sourceDataSize != newSize)
  431. changeLevel = jmax (2, changeLevel);
  432. currentInfo.sourceType = SourceType::sourceBinaryData;
  433. currentInfo.sourceData = newPtr;
  434. currentInfo.sourceDataSize = newSize;
  435. currentInfo.fileImpulseResponse = File();
  436. }
  437. else if (newSourceType == SourceType::sourceAudioFile)
  438. {
  439. File newFile (arrayParameters->getUnchecked (1).toString());
  440. if (currentInfo.fileImpulseResponse != newFile)
  441. changeLevel = jmax (2, changeLevel);
  442. currentInfo.sourceType = SourceType::sourceAudioFile;
  443. currentInfo.fileImpulseResponse = newFile;
  444. currentInfo.sourceData = nullptr;
  445. currentInfo.sourceDataSize = 0;
  446. }
  447. else if (newSourceType == SourceType::sourceAudioBuffer)
  448. {
  449. double originalSampleRate (arrayParameters->getUnchecked (1));
  450. changeLevel = jmax (2, changeLevel);
  451. currentInfo.sourceType = SourceType::sourceAudioBuffer;
  452. currentInfo.originalSampleRate = originalSampleRate;
  453. currentInfo.fileImpulseResponse = File();
  454. currentInfo.sourceData = nullptr;
  455. currentInfo.sourceDataSize = 0;
  456. }
  457. }
  458. break;
  459. case ChangeRequest::changeImpulseResponseSize:
  460. {
  461. int64 newSize = requestsParameter[n];
  462. if (currentInfo.wantedSize != newSize)
  463. changeLevel = jmax (1, changeLevel);
  464. currentInfo.wantedSize = newSize;
  465. }
  466. break;
  467. case ChangeRequest::changeStereo:
  468. {
  469. bool newWantsStereo = requestsParameter[n];
  470. if (currentInfo.wantsStereo != newWantsStereo)
  471. changeLevel = jmax (0, changeLevel);
  472. currentInfo.wantsStereo = newWantsStereo;
  473. }
  474. break;
  475. case ChangeRequest::changeTrimming:
  476. {
  477. bool newWantsTrimming = requestsParameter[n];
  478. if (currentInfo.wantsTrimming != newWantsTrimming)
  479. changeLevel = jmax (1, changeLevel);
  480. currentInfo.wantsTrimming = newWantsTrimming;
  481. }
  482. break;
  483. case ChangeRequest::changeNormalisation:
  484. {
  485. bool newWantsNormalisation = requestsParameter[n];
  486. if (currentInfo.wantsNormalisation != newWantsNormalisation)
  487. changeLevel = jmax (1, changeLevel);
  488. currentInfo.wantsNormalisation = newWantsNormalisation;
  489. }
  490. break;
  491. case ChangeRequest::numChangeRequestTypes:
  492. case ChangeRequest::changeIgnore:
  493. break;
  494. default:
  495. jassertfalse;
  496. break;
  497. }
  498. }
  499. if (currentInfo.sourceType == SourceType::sourceNone)
  500. {
  501. currentInfo.sourceType = SourceType::sourceAudioBuffer;
  502. if (currentInfo.sampleRate == 0)
  503. currentInfo.sampleRate = 44100;
  504. if (currentInfo.maximumBufferSize == 0)
  505. currentInfo.maximumBufferSize = 128;
  506. currentInfo.originalSampleRate = currentInfo.sampleRate;
  507. currentInfo.wantedSize = 1;
  508. currentInfo.fileImpulseResponse = File();
  509. currentInfo.sourceData = nullptr;
  510. currentInfo.sourceDataSize = 0;
  511. AudioBuffer<float> newBuffer;
  512. newBuffer.setSize (1, 1);
  513. newBuffer.setSample (0, 0, 1.f);
  514. copyBufferToTemporaryLocation (newBuffer);
  515. }
  516. // action depending on the change level
  517. if (changeLevel == 3)
  518. {
  519. loadImpulseResponse();
  520. processImpulseResponse();
  521. initializeConvolutionEngines();
  522. }
  523. else if (changeLevel > 0)
  524. {
  525. startThread();
  526. }
  527. }
  528. //==============================================================================
  529. /** This function copies a buffer to a temporary location, so that any external
  530. audio source can be processed then in the dedicated thread.
  531. */
  532. void copyBufferToTemporaryLocation (AudioBlock<float> block)
  533. {
  534. const SpinLock::ScopedLockType sl (processLock);
  535. currentInfo.originalNumChannels = (block.getNumChannels() > 1 ? 2 : 1);
  536. currentInfo.originalSize = (int) jmin ((size_t) maximumTimeInSamples, block.getNumSamples());
  537. for (auto channel = 0; channel < currentInfo.originalNumChannels; ++channel)
  538. temporaryBuffer.copyFrom (channel, 0, block.getChannelPointer ((size_t) channel), (int) currentInfo.originalSize);
  539. }
  540. //==============================================================================
  541. /** Resets the convolution engines states. */
  542. void reset()
  543. {
  544. for (auto* e : engines)
  545. e->reset();
  546. mustInterpolate = false;
  547. processFifo();
  548. }
  549. /** Convolution processing handling interpolation between previous and new states
  550. of the convolution engines.
  551. */
  552. void processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output)
  553. {
  554. processFifo();
  555. size_t numChannels = jmin (input.getNumChannels(), (size_t) (currentInfo.wantsStereo ? 2 : 1));
  556. size_t numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  557. if (mustInterpolate == false)
  558. {
  559. for (size_t channel = 0; channel < numChannels; ++channel)
  560. engines[(int) channel]->processSamples (input.getChannelPointer (channel), output.getChannelPointer (channel), numSamples);
  561. }
  562. else
  563. {
  564. auto interpolated = AudioBlock<float> (interpolationBuffer).getSubBlock (0, numSamples);
  565. for (size_t channel = 0; channel < numChannels; ++channel)
  566. {
  567. auto&& buffer = output.getSingleChannelBlock (channel);
  568. interpolationBuffer.copyFrom (0, 0, input.getChannelPointer (channel), (int) numSamples);
  569. engines[(int) channel]->processSamples (input.getChannelPointer (channel), buffer.getChannelPointer (0), numSamples);
  570. changeVolumes[channel].applyGain (buffer.getChannelPointer (0), (int) numSamples);
  571. auto* interPtr = interpolationBuffer.getWritePointer (0);
  572. engines[(int) channel + 2]->processSamples (interPtr, interPtr, numSamples);
  573. changeVolumes[channel + 2].applyGain (interPtr, (int) numSamples);
  574. buffer += interpolated;
  575. }
  576. if (input.getNumChannels() > 1 && currentInfo.wantsStereo == false)
  577. {
  578. auto&& buffer = output.getSingleChannelBlock (1);
  579. changeVolumes[1].applyGain (buffer.getChannelPointer (0), (int) numSamples);
  580. changeVolumes[3].applyGain (buffer.getChannelPointer (0), (int) numSamples);
  581. }
  582. if (changeVolumes[0].isSmoothing() == false)
  583. {
  584. mustInterpolate = false;
  585. for (auto channel = 0; channel < 2; ++channel)
  586. engines[channel]->copyStateFromOtherEngine (*engines[channel + 2]);
  587. }
  588. }
  589. if (input.getNumChannels() > 1 && currentInfo.wantsStereo == false)
  590. output.getSingleChannelBlock (1).copyFrom (output.getSingleChannelBlock (0));
  591. }
  592. //==============================================================================
  593. const int64 maximumTimeInSamples = 10 * 96000;
  594. private:
  595. //==============================================================================
  596. /** This the thread run function which does the preparation of data depending
  597. on the requested change level.
  598. */
  599. void run() override
  600. {
  601. if (changeLevel == 2)
  602. {
  603. loadImpulseResponse();
  604. if (isThreadRunning() && threadShouldExit())
  605. return;
  606. }
  607. processImpulseResponse();
  608. if (isThreadRunning() && threadShouldExit())
  609. return;
  610. initializeConvolutionEngines();
  611. }
  612. /** Loads the impulse response from the requested audio source. */
  613. void loadImpulseResponse()
  614. {
  615. if (currentInfo.sourceType == SourceType::sourceBinaryData)
  616. {
  617. copyAudioStreamInAudioBuffer (std::make_unique<MemoryInputStream> (currentInfo.sourceData, (size_t) currentInfo.sourceDataSize, false));
  618. }
  619. else if (currentInfo.sourceType == SourceType::sourceAudioFile)
  620. {
  621. copyAudioStreamInAudioBuffer (std::make_unique<FileInputStream> (currentInfo.fileImpulseResponse));
  622. }
  623. else if (currentInfo.sourceType == SourceType::sourceAudioBuffer)
  624. {
  625. copyBufferFromTemporaryLocation();
  626. }
  627. }
  628. /** Processes the impulse response data with the requested treatments
  629. and resampling if needed.
  630. */
  631. void processImpulseResponse()
  632. {
  633. trimAndResampleImpulseResponse (currentInfo.originalNumChannels, currentInfo.originalSampleRate, currentInfo.wantsTrimming);
  634. if (isThreadRunning() && threadShouldExit())
  635. return;
  636. if (currentInfo.wantsNormalisation)
  637. {
  638. if (currentInfo.originalNumChannels > 1)
  639. {
  640. normaliseImpulseResponse (currentInfo.buffer->getWritePointer (0), (int) currentInfo.finalSize, 1.0);
  641. normaliseImpulseResponse (currentInfo.buffer->getWritePointer (1), (int) currentInfo.finalSize, 1.0);
  642. }
  643. else
  644. {
  645. normaliseImpulseResponse (currentInfo.buffer->getWritePointer (0), (int) currentInfo.finalSize, 1.0);
  646. }
  647. }
  648. if (currentInfo.originalNumChannels == 1)
  649. currentInfo.buffer->copyFrom (1, 0, *currentInfo.buffer, 0, 0, (int) currentInfo.finalSize);
  650. }
  651. /** Converts the data from an audio file into a stereo audio buffer of floats, and
  652. performs resampling if necessary.
  653. */
  654. bool copyAudioStreamInAudioBuffer (std::unique_ptr<InputStream> stream)
  655. {
  656. AudioFormatManager manager;
  657. manager.registerBasicFormats();
  658. std::unique_ptr<AudioFormatReader> formatReader (manager.createReaderFor (std::move (stream)));
  659. if (formatReader != nullptr)
  660. {
  661. currentInfo.originalNumChannels = formatReader->numChannels > 1 ? 2 : 1;
  662. currentInfo.originalSampleRate = formatReader->sampleRate;
  663. currentInfo.originalSize = static_cast<int> (jmin (maximumTimeInSamples, formatReader->lengthInSamples));
  664. impulseResponseOriginal.clear();
  665. formatReader->read (&(impulseResponseOriginal), 0, (int) currentInfo.originalSize, 0, true, currentInfo.originalNumChannels > 1);
  666. return true;
  667. }
  668. return false;
  669. }
  670. /** Copies a buffer from a temporary location to the impulseResponseOriginal
  671. buffer for the sourceAudioBuffer.
  672. */
  673. void copyBufferFromTemporaryLocation()
  674. {
  675. const SpinLock::ScopedLockType sl (processLock);
  676. for (auto channel = 0; channel < currentInfo.originalNumChannels; ++channel)
  677. impulseResponseOriginal.copyFrom (channel, 0, temporaryBuffer, channel, 0, (int) currentInfo.originalSize);
  678. }
  679. /** Trim and resample the impulse response if needed. */
  680. void trimAndResampleImpulseResponse (int numChannels, double srcSampleRate, bool mustTrim)
  681. {
  682. auto thresholdTrim = Decibels::decibelsToGain (-80.0f);
  683. auto indexStart = 0;
  684. auto indexEnd = currentInfo.originalSize - 1;
  685. if (mustTrim)
  686. {
  687. indexStart = currentInfo.originalSize - 1;
  688. indexEnd = 0;
  689. for (auto channel = 0; channel < numChannels; ++channel)
  690. {
  691. auto localIndexStart = 0;
  692. auto localIndexEnd = currentInfo.originalSize - 1;
  693. auto* channelData = impulseResponseOriginal.getReadPointer (channel);
  694. while (localIndexStart < currentInfo.originalSize - 1
  695. && channelData[localIndexStart] <= thresholdTrim
  696. && channelData[localIndexStart] >= -thresholdTrim)
  697. ++localIndexStart;
  698. while (localIndexEnd >= 0
  699. && channelData[localIndexEnd] <= thresholdTrim
  700. && channelData[localIndexEnd] >= -thresholdTrim)
  701. --localIndexEnd;
  702. indexStart = jmin (indexStart, localIndexStart);
  703. indexEnd = jmax (indexEnd, localIndexEnd);
  704. }
  705. if (indexStart > 0)
  706. {
  707. for (auto channel = 0; channel < numChannels; ++channel)
  708. {
  709. auto* channelData = impulseResponseOriginal.getWritePointer (channel);
  710. for (auto i = 0; i < indexEnd - indexStart + 1; ++i)
  711. channelData[i] = channelData[i + indexStart];
  712. for (auto i = indexEnd - indexStart + 1; i < currentInfo.originalSize - 1; ++i)
  713. channelData[i] = 0.0f;
  714. }
  715. }
  716. }
  717. if (currentInfo.sampleRate == srcSampleRate)
  718. {
  719. // No resampling
  720. currentInfo.finalSize = jmin (static_cast<int> (currentInfo.wantedSize), indexEnd - indexStart + 1);
  721. impulseResponse.clear();
  722. for (auto channel = 0; channel < numChannels; ++channel)
  723. impulseResponse.copyFrom (channel, 0, impulseResponseOriginal, channel, 0, (int) currentInfo.finalSize);
  724. }
  725. else
  726. {
  727. // Resampling
  728. auto factorReading = srcSampleRate / currentInfo.sampleRate;
  729. currentInfo.finalSize = jmin (static_cast<int> (currentInfo.wantedSize), roundToInt ((indexEnd - indexStart + 1) / factorReading));
  730. impulseResponse.clear();
  731. MemoryAudioSource memorySource (impulseResponseOriginal, false);
  732. ResamplingAudioSource resamplingSource (&memorySource, false, (int) numChannels);
  733. resamplingSource.setResamplingRatio (factorReading);
  734. resamplingSource.prepareToPlay ((int) currentInfo.finalSize, currentInfo.sampleRate);
  735. AudioSourceChannelInfo info;
  736. info.startSample = 0;
  737. info.numSamples = (int) currentInfo.finalSize;
  738. info.buffer = &impulseResponse;
  739. resamplingSource.getNextAudioBlock (info);
  740. }
  741. // Filling the second channel with the first if necessary
  742. if (numChannels == 1)
  743. impulseResponse.copyFrom (1, 0, impulseResponse, 0, 0, (int) currentInfo.finalSize);
  744. }
  745. /** Normalisation of the impulse response based on its energy. */
  746. void normaliseImpulseResponse (float* samples, int numSamples, double factorResampling) const
  747. {
  748. auto magnitude = 0.0f;
  749. for (auto i = 0; i < numSamples; ++i)
  750. magnitude += samples[i] * samples[i];
  751. auto magnitudeInv = 1.0f / (4.0f * std::sqrt (magnitude)) * 0.5f * static_cast <float> (factorResampling);
  752. for (auto i = 0; i < numSamples; ++i)
  753. samples[i] *= magnitudeInv;
  754. }
  755. // ================================================================================================================
  756. /** Initializes the convolution engines depending on the provided sizes
  757. and performs the FFT on the impulse responses.
  758. */
  759. void initializeConvolutionEngines()
  760. {
  761. if (currentInfo.maximumBufferSize == 0)
  762. return;
  763. if (changeLevel == 3)
  764. {
  765. for (auto i = 0; i < 2; ++i)
  766. engines[i]->initializeConvolutionEngine (currentInfo, i);
  767. mustInterpolate = false;
  768. }
  769. else
  770. {
  771. for (auto i = 0; i < 2; ++i)
  772. {
  773. engines[i + 2]->initializeConvolutionEngine (currentInfo, i);
  774. engines[i + 2]->reset();
  775. if (isThreadRunning() && threadShouldExit())
  776. return;
  777. }
  778. for (auto i = 0; i < 2; ++i)
  779. {
  780. changeVolumes[i].setTargetValue (1.0f);
  781. changeVolumes[i].reset (currentInfo.sampleRate, 0.05);
  782. changeVolumes[i].setTargetValue (0.0f);
  783. changeVolumes[i + 2].setTargetValue (0.0f);
  784. changeVolumes[i + 2].reset (currentInfo.sampleRate, 0.05);
  785. changeVolumes[i + 2].setTargetValue (1.0f);
  786. }
  787. mustInterpolate = true;
  788. }
  789. }
  790. //==============================================================================
  791. static constexpr int fifoSize = 1024; // the size of the fifo which handles all the change requests
  792. AbstractFifo abstractFifo; // the abstract fifo
  793. Array<ChangeRequest> fifoRequestsType; // an array of ChangeRequest
  794. Array<juce::var> fifoRequestsParameter; // an array of change parameters
  795. Array<ChangeRequest> requestsType; // an array of ChangeRequest
  796. Array<juce::var> requestsParameter; // an array of change parameters
  797. int changeLevel = 0; // the current level of requested change in the convolution engine
  798. //==============================================================================
  799. ConvolutionEngine::ProcessingInformation currentInfo; // the information about the impulse response to load
  800. AudioBuffer<float> temporaryBuffer; // a temporary buffer that is used when the function copyAndLoadImpulseResponse is called in the main API
  801. SpinLock processLock; // a necessary lock to use with this temporary buffer
  802. AudioBuffer<float> impulseResponseOriginal; // a buffer with the original impulse response
  803. AudioBuffer<float> impulseResponse; // a buffer with the impulse response trimmed, resampled, resized and normalised
  804. //==============================================================================
  805. OwnedArray<ConvolutionEngine> engines; // the 4 convolution engines being used
  806. AudioBuffer<float> interpolationBuffer; // a buffer to do the interpolation between the convolution engines 0-1 and 2-3
  807. LogRampedValue<float> changeVolumes[4]; // the volumes for each convolution engine during interpolation
  808. bool mustInterpolate = false; // tells if the convolution engines outputs must be currently interpolated
  809. //==============================================================================
  810. JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Pimpl)
  811. };
  812. //==============================================================================
  813. Convolution::Convolution()
  814. {
  815. pimpl.reset (new Pimpl());
  816. pimpl->addToFifo (Convolution::Pimpl::ChangeRequest::changeEngine, juce::var (0));
  817. }
  818. Convolution::~Convolution()
  819. {
  820. }
  821. void Convolution::loadImpulseResponse (const void* sourceData, size_t sourceDataSize,
  822. bool wantsStereo, bool wantsTrimming, size_t size,
  823. bool wantsNormalisation)
  824. {
  825. if (sourceData == nullptr)
  826. return;
  827. auto maximumSamples = (size_t) pimpl->maximumTimeInSamples;
  828. auto wantedSize = (size == 0 ? maximumSamples : jmin (size, maximumSamples));
  829. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
  830. Pimpl::ChangeRequest::changeImpulseResponseSize,
  831. Pimpl::ChangeRequest::changeStereo,
  832. Pimpl::ChangeRequest::changeTrimming,
  833. Pimpl::ChangeRequest::changeNormalisation };
  834. Array<juce::var> sourceParameter;
  835. sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceBinaryData));
  836. sourceParameter.add (juce::var (sourceData, sourceDataSize));
  837. juce::var parameters[] = { juce::var (sourceParameter),
  838. juce::var (static_cast<int64> (wantedSize)),
  839. juce::var (wantsStereo),
  840. juce::var (wantsTrimming),
  841. juce::var (wantsNormalisation) };
  842. pimpl->addToFifo (types, parameters, 5);
  843. }
  844. void Convolution::loadImpulseResponse (const File& fileImpulseResponse, bool wantsStereo,
  845. bool wantsTrimming, size_t size, bool wantsNormalisation)
  846. {
  847. if (! fileImpulseResponse.existsAsFile())
  848. return;
  849. auto maximumSamples = (size_t) pimpl->maximumTimeInSamples;
  850. auto wantedSize = (size == 0 ? maximumSamples : jmin (size, maximumSamples));
  851. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
  852. Pimpl::ChangeRequest::changeImpulseResponseSize,
  853. Pimpl::ChangeRequest::changeStereo,
  854. Pimpl::ChangeRequest::changeTrimming,
  855. Pimpl::ChangeRequest::changeNormalisation };
  856. Array<juce::var> sourceParameter;
  857. sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceAudioFile));
  858. sourceParameter.add (juce::var (fileImpulseResponse.getFullPathName()));
  859. juce::var parameters[] = { juce::var (sourceParameter),
  860. juce::var (static_cast<int64> (wantedSize)),
  861. juce::var (wantsStereo),
  862. juce::var (wantsTrimming),
  863. juce::var (wantsNormalisation) };
  864. pimpl->addToFifo (types, parameters, 5);
  865. }
  866. void Convolution::copyAndLoadImpulseResponseFromBuffer (AudioBuffer<float>& buffer,
  867. double bufferSampleRate, bool wantsStereo, bool wantsTrimming, bool wantsNormalisation, size_t size)
  868. {
  869. copyAndLoadImpulseResponseFromBlock (AudioBlock<float> (buffer), bufferSampleRate,
  870. wantsStereo, wantsTrimming, wantsNormalisation, size);
  871. }
  872. void Convolution::copyAndLoadImpulseResponseFromBlock (AudioBlock<float> block, double bufferSampleRate,
  873. bool wantsStereo, bool wantsTrimming, bool wantsNormalisation, size_t size)
  874. {
  875. jassert (bufferSampleRate > 0);
  876. if (block.getNumSamples() == 0)
  877. return;
  878. auto maximumSamples = (size_t) pimpl->maximumTimeInSamples;
  879. auto wantedSize = (size == 0 ? maximumSamples : jmin (size, maximumSamples));
  880. pimpl->copyBufferToTemporaryLocation (block);
  881. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
  882. Pimpl::ChangeRequest::changeImpulseResponseSize,
  883. Pimpl::ChangeRequest::changeStereo,
  884. Pimpl::ChangeRequest::changeTrimming,
  885. Pimpl::ChangeRequest::changeNormalisation };
  886. Array<juce::var> sourceParameter;
  887. sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceAudioBuffer));
  888. sourceParameter.add (juce::var (bufferSampleRate));
  889. juce::var parameters[] = { juce::var (sourceParameter),
  890. juce::var (static_cast<int64> (wantedSize)),
  891. juce::var (wantsStereo),
  892. juce::var (wantsTrimming),
  893. juce::var (wantsNormalisation) };
  894. pimpl->addToFifo (types, parameters, 5);
  895. }
  896. void Convolution::prepare (const ProcessSpec& spec)
  897. {
  898. jassert (isPositiveAndBelow (spec.numChannels, static_cast<uint32> (3))); // only mono and stereo is supported
  899. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSampleRate,
  900. Pimpl::ChangeRequest::changeMaximumBufferSize };
  901. juce::var parameters[] = { juce::var (spec.sampleRate),
  902. juce::var (static_cast<int> (spec.maximumBlockSize)) };
  903. pimpl->addToFifo (types, parameters, 2);
  904. pimpl->initProcessing (static_cast<int> (spec.maximumBlockSize));
  905. for (size_t channel = 0; channel < spec.numChannels; ++channel)
  906. {
  907. volumeDry[channel].reset (spec.sampleRate, 0.05);
  908. volumeWet[channel].reset (spec.sampleRate, 0.05);
  909. }
  910. sampleRate = spec.sampleRate;
  911. dryBuffer = AudioBlock<float> (dryBufferStorage,
  912. jmin (spec.numChannels, 2u),
  913. spec.maximumBlockSize);
  914. isActive = true;
  915. }
  916. void Convolution::reset() noexcept
  917. {
  918. dryBuffer.clear();
  919. pimpl->reset();
  920. }
  921. void Convolution::processSamples (const AudioBlock<const float>& input, AudioBlock<float>& output, bool isBypassed) noexcept
  922. {
  923. if (! isActive)
  924. return;
  925. jassert (input.getNumChannels() == output.getNumChannels());
  926. jassert (isPositiveAndBelow (input.getNumChannels(), static_cast<size_t> (3))); // only mono and stereo is supported
  927. auto numChannels = jmin (input.getNumChannels(), (size_t) 2);
  928. auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  929. auto dry = dryBuffer.getSubsetChannelBlock (0, numChannels);
  930. if (volumeDry[0].isSmoothing())
  931. {
  932. dry.copyFrom (input);
  933. for (size_t channel = 0; channel < numChannels; ++channel)
  934. volumeDry[channel].applyGain (dry.getChannelPointer (channel), (int) numSamples);
  935. pimpl->processSamples (input, output);
  936. for (size_t channel = 0; channel < numChannels; ++channel)
  937. volumeWet[channel].applyGain (output.getChannelPointer (channel), (int) numSamples);
  938. output += dry;
  939. }
  940. else
  941. {
  942. if (! currentIsBypassed)
  943. pimpl->processSamples (input, output);
  944. if (isBypassed != currentIsBypassed)
  945. {
  946. currentIsBypassed = isBypassed;
  947. for (size_t channel = 0; channel < numChannels; ++channel)
  948. {
  949. volumeDry[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  950. volumeDry[channel].reset (sampleRate, 0.05);
  951. volumeDry[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  952. volumeWet[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
  953. volumeWet[channel].reset (sampleRate, 0.05);
  954. volumeWet[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
  955. }
  956. }
  957. }
  958. }
  959. } // namespace dsp
  960. } // namespace juce