The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1182 lines
44KB

  1. /*
  2. ==============================================================================
  3. This file is part of the JUCE library.
  4. Copyright (c) 2017 - ROLI Ltd.
  5. JUCE is an open source library subject to commercial or open-source
  6. licensing.
  7. By using JUCE, you agree to the terms of both the JUCE 5 End-User License
  8. Agreement and JUCE 5 Privacy Policy (both updated and effective as of the
  9. 27th April 2017).
  10. End User License Agreement: www.juce.com/juce-5-licence
  11. Privacy Policy: www.juce.com/juce-5-privacy-policy
  12. Or: You may also use this code under the terms of the GPL v3 (see
  13. www.gnu.org/licenses).
  14. JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
  15. EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
  16. DISCLAIMED.
  17. ==============================================================================
  18. */
  19. namespace juce
  20. {
  21. namespace dsp
  22. {
  23. /** This class is the convolution engine itself, processing only one channel at
  24. a time of input signal.
  25. */
  26. struct ConvolutionEngine
  27. {
  28. ConvolutionEngine() = default;
  29. //==============================================================================
  30. struct ProcessingInformation
  31. {
  32. enum class SourceType
  33. {
  34. sourceBinaryData,
  35. sourceAudioFile,
  36. sourceAudioBuffer,
  37. sourceNone
  38. };
  39. SourceType sourceType = SourceType::sourceNone;
  40. const void* sourceData;
  41. size_t sourceDataSize;
  42. File fileImpulseResponse;
  43. double bufferSampleRate;
  44. AudioBuffer<float>* buffer;
  45. double sampleRate = 0;
  46. bool wantsStereo = true;
  47. bool wantsTrimming = true;
  48. bool wantsNormalization = true;
  49. size_t impulseResponseSize = 0;
  50. size_t maximumBufferSize = 0;
  51. };
  52. //==============================================================================
  53. void reset()
  54. {
  55. bufferInput.clear();
  56. bufferOverlap.clear();
  57. bufferTempOutput.clear();
  58. for (auto i = 0; i < buffersInputSegments.size(); ++i)
  59. buffersInputSegments.getReference (i).clear();
  60. currentSegment = 0;
  61. inputDataPos = 0;
  62. }
  63. /** Initalize all the states and objects to perform the convolution. */
  64. void initializeConvolutionEngine (ProcessingInformation& info, int channel)
  65. {
  66. blockSize = (size_t) nextPowerOfTwo ((int) info.maximumBufferSize);
  67. FFTSize = blockSize > 128 ? 2 * blockSize
  68. : 4 * blockSize;
  69. numSegments = ((size_t) info.buffer->getNumSamples()) / (FFTSize - blockSize) + 1u;
  70. numInputSegments = (blockSize > 128 ? numSegments : 3 * numSegments);
  71. FFTobject = new FFT (roundDoubleToInt (log2 (FFTSize)));
  72. bufferInput.setSize (1, static_cast<int> (FFTSize));
  73. bufferOutput.setSize (1, static_cast<int> (FFTSize * 2));
  74. bufferTempOutput.setSize (1, static_cast<int> (FFTSize * 2));
  75. bufferOverlap.setSize (1, static_cast<int> (FFTSize));
  76. buffersInputSegments.clear();
  77. buffersImpulseSegments.clear();
  78. for (size_t i = 0; i < numInputSegments; ++i)
  79. {
  80. AudioBuffer<float> newInputSegment;
  81. newInputSegment.setSize (1, static_cast<int> (FFTSize * 2));
  82. buffersInputSegments.add (newInputSegment);
  83. }
  84. for (auto i = 0u; i < numSegments; ++i)
  85. {
  86. AudioBuffer<float> newImpulseSegment;
  87. newImpulseSegment.setSize (1, static_cast<int> (FFTSize * 2));
  88. buffersImpulseSegments.add (newImpulseSegment);
  89. }
  90. ScopedPointer<FFT> FFTTempObject = new FFT (roundDoubleToInt (log2 (FFTSize)));
  91. auto numChannels = (info.wantsStereo && info.buffer->getNumChannels() >= 2 ? 2 : 1);
  92. if (channel < numChannels)
  93. {
  94. auto* channelData = info.buffer->getWritePointer (channel);
  95. for (size_t n = 0; n < numSegments; ++n)
  96. {
  97. buffersImpulseSegments.getReference (static_cast<int> (n)).clear();
  98. auto* impulseResponse = buffersImpulseSegments.getReference (static_cast<int> (n)).getWritePointer (0);
  99. if (n == 0)
  100. impulseResponse[0] = 1.0f;
  101. for (size_t i = 0; i < FFTSize - blockSize; ++i)
  102. if (i + n * (FFTSize - blockSize) < (size_t) info.buffer->getNumSamples())
  103. impulseResponse[i] = channelData[i + n * (FFTSize - blockSize)];
  104. FFTTempObject->performRealOnlyForwardTransform (impulseResponse);
  105. prepareForConvolution (impulseResponse);
  106. }
  107. }
  108. reset();
  109. isReady = true;
  110. }
  111. /** Copy the states of another engine. */
  112. void copyStateFromOtherEngine (const ConvolutionEngine& other)
  113. {
  114. if (FFTSize != other.FFTSize)
  115. {
  116. FFTobject = new FFT (roundDoubleToInt (log2 (other.FFTSize)));
  117. FFTSize = other.FFTSize;
  118. }
  119. currentSegment = other.currentSegment;
  120. numInputSegments = other.numInputSegments;
  121. numSegments = other.numSegments;
  122. blockSize = other.blockSize;
  123. inputDataPos = other.inputDataPos;
  124. bufferInput = other.bufferInput;
  125. bufferTempOutput = other.bufferTempOutput;
  126. bufferOutput = other.bufferOutput;
  127. buffersInputSegments = other.buffersInputSegments;
  128. buffersImpulseSegments = other.buffersImpulseSegments;
  129. bufferOverlap = other.bufferOverlap;
  130. isReady = true;
  131. }
  132. /** Performs the uniform partitioned convolution using FFT. */
  133. void processSamples (const float* input, float* output, size_t numSamples)
  134. {
  135. if (! isReady)
  136. return;
  137. // Overlap-add, zero latency convolution algorithm with uniform partitioning
  138. size_t numSamplesProcessed = 0;
  139. auto indexStep = numInputSegments / numSegments;
  140. auto* inputData = bufferInput.getWritePointer (0);
  141. auto* outputTempData = bufferTempOutput.getWritePointer (0);
  142. auto* outputData = bufferOutput.getWritePointer (0);
  143. auto* overlapData = bufferOverlap.getWritePointer (0);
  144. while (numSamplesProcessed < numSamples)
  145. {
  146. const bool inputDataWasEmpty = (inputDataPos == 0);
  147. auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
  148. // copy the input samples
  149. FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed, static_cast<int> (numSamplesToProcess));
  150. auto* inputSegmentData = buffersInputSegments.getReference (static_cast<int> (currentSegment)).getWritePointer (0);
  151. FloatVectorOperations::copy (inputSegmentData, inputData, static_cast<int> (FFTSize));
  152. // Forward FFT
  153. FFTobject->performRealOnlyForwardTransform (inputSegmentData);
  154. prepareForConvolution (inputSegmentData);
  155. // Complex multiplication
  156. if (inputDataWasEmpty)
  157. {
  158. FloatVectorOperations::fill (outputTempData, 0, static_cast<int> (FFTSize + 1));
  159. auto index = currentSegment;
  160. for (size_t i = 1; i < numSegments; ++i)
  161. {
  162. index += indexStep;
  163. if (index >= numInputSegments)
  164. index -= numInputSegments;
  165. convolutionProcessingAndAccumulate (buffersInputSegments.getReference (static_cast<int> (index)).getWritePointer (0),
  166. buffersImpulseSegments.getReference (static_cast<int> (i)).getWritePointer (0),
  167. outputTempData);
  168. }
  169. }
  170. FloatVectorOperations::copy (outputData, outputTempData, static_cast<int> (FFTSize + 1));
  171. convolutionProcessingAndAccumulate (buffersInputSegments.getReference (static_cast<int> (currentSegment)).getWritePointer (0),
  172. buffersImpulseSegments.getReference (0).getWritePointer (0),
  173. outputData);
  174. // Inverse FFT
  175. updateSymmetricFrequencyDomainData (outputData);
  176. FFTobject->performRealOnlyInverseTransform (outputData);
  177. // Add overlap
  178. for (size_t i = 0; i < numSamplesToProcess; ++i)
  179. output[i + numSamplesProcessed] = outputData[inputDataPos + i] + overlapData[inputDataPos + i];
  180. // Input buffer full => Next block
  181. inputDataPos += numSamplesToProcess;
  182. if (inputDataPos == blockSize)
  183. {
  184. // Input buffer is empty again now
  185. FloatVectorOperations::fill (inputData, 0.0f, static_cast<int> (FFTSize));
  186. inputDataPos = 0;
  187. // Extra step for segSize > blockSize
  188. FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]), static_cast<int> (FFTSize - 2 * blockSize));
  189. // Save the overlap
  190. FloatVectorOperations::copy (overlapData, &(outputData[blockSize]), static_cast<int> (FFTSize - blockSize));
  191. // Update current segment
  192. currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
  193. }
  194. numSamplesProcessed += numSamplesToProcess;
  195. }
  196. }
  197. /** After each FFT, this function is called to allow convolution to be performed with only 4 SIMD functions calls. */
  198. void prepareForConvolution (float *samples) noexcept
  199. {
  200. auto FFTSizeDiv2 = FFTSize / 2;
  201. for (size_t i = 0; i < FFTSizeDiv2; i++)
  202. samples[i] = samples[2 * i];
  203. samples[FFTSizeDiv2] = 0;
  204. for (size_t i = 1; i < FFTSizeDiv2; i++)
  205. samples[i + FFTSizeDiv2] = -samples[2 * (FFTSize - i) + 1];
  206. }
  207. /** Does the convolution operation itself only on half of the frequency domain samples. */
  208. void convolutionProcessingAndAccumulate (const float *input, const float *impulse, float *output)
  209. {
  210. auto FFTSizeDiv2 = FFTSize / 2;
  211. FloatVectorOperations::addWithMultiply (output, input, impulse, static_cast<int> (FFTSizeDiv2));
  212. FloatVectorOperations::subtractWithMultiply (output, &(input[FFTSizeDiv2]), &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  213. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), input, &(impulse[FFTSizeDiv2]), static_cast<int> (FFTSizeDiv2));
  214. FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), &(input[FFTSizeDiv2]), impulse, static_cast<int> (FFTSizeDiv2));
  215. }
  216. /** Undo the re-organization of samples from the function prepareForConvolution.
  217. Then, takes the conjugate of the frequency domain first half of samples, to fill the
  218. second half, so that the inverse transform will return real samples in the time domain.
  219. */
  220. void updateSymmetricFrequencyDomainData (float* samples) noexcept
  221. {
  222. auto FFTSizeDiv2 = FFTSize / 2;
  223. for (size_t i = 1; i < FFTSizeDiv2; i++)
  224. {
  225. samples[2 * (FFTSize - i)] = samples[i];
  226. samples[2 * (FFTSize - i) + 1] = -samples[FFTSizeDiv2 + i];
  227. }
  228. samples[1] = 0.f;
  229. for (size_t i = 1; i < FFTSizeDiv2; i++)
  230. {
  231. samples[2 * i] = samples[2 * (FFTSize - i)];
  232. samples[2 * i + 1] = -samples[2 * (FFTSize - i) + 1];
  233. }
  234. }
  235. //==============================================================================
  236. ScopedPointer<FFT> FFTobject;
  237. size_t FFTSize = 0;
  238. size_t currentSegment = 0, numInputSegments = 0, numSegments = 0, blockSize = 0, inputDataPos = 0;
  239. AudioBuffer<float> bufferInput, bufferOutput, bufferTempOutput, bufferOverlap;
  240. Array<AudioBuffer<float>> buffersInputSegments, buffersImpulseSegments;
  241. bool isReady = false;
  242. //==============================================================================
  243. JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (ConvolutionEngine)
  244. };
  245. //==============================================================================
  246. /** Manages all the changes requested by the main convolution engine, to minimize
  247. the number of calls of the convolution engine initialization, and the potential
  248. consequences of multiple quick calls to the function Convolution::loadImpulseResponse.
  249. */
  250. struct Convolution::Pimpl : private Thread
  251. {
  252. public:
  253. enum class ChangeRequest
  254. {
  255. changeEngine = 0,
  256. changeSampleRate,
  257. changeMaximumBufferSize,
  258. changeSource,
  259. changeImpulseResponseSize,
  260. changeStereo,
  261. changeTrimming,
  262. changeNormalization,
  263. numChangeRequestTypes
  264. };
  265. using SourceType = ConvolutionEngine::ProcessingInformation::SourceType;
  266. //==============================================================================
  267. Pimpl() : Thread ("Convolution"), abstractFifo (fifoSize)
  268. {
  269. abstractFifo.reset();
  270. requestsType.resize (fifoSize);
  271. requestsParameter.resize (fifoSize);
  272. for (auto i = 0u; i < 4; ++i)
  273. engines.add (new ConvolutionEngine());
  274. currentInfo.maximumBufferSize = 0;
  275. currentInfo.buffer = &impulseResponse;
  276. }
  277. ~Pimpl()
  278. {
  279. stopThread (10000);
  280. }
  281. //==============================================================================
  282. /** Adds a new change request. */
  283. void addToFifo (ChangeRequest type, juce::var parameter)
  284. {
  285. int start1, size1, start2, size2;
  286. abstractFifo.prepareToWrite (1, start1, size1, start2, size2);
  287. if (size1 > 0)
  288. {
  289. requestsType.setUnchecked (start1, type);
  290. requestsParameter.setUnchecked (start1, parameter);
  291. }
  292. if (size2 > 0)
  293. {
  294. requestsType.setUnchecked (start2, type);
  295. requestsParameter.setUnchecked (start2, parameter);
  296. }
  297. abstractFifo.finishedWrite (size1 + size2);
  298. }
  299. /** Adds a new array of change requests. */
  300. void addToFifo (ChangeRequest* types, juce::var* parameters, int numEntries)
  301. {
  302. int start1, size1, start2, size2;
  303. abstractFifo.prepareToWrite (numEntries, start1, size1, start2, size2);
  304. if (size1 > 0)
  305. {
  306. for (int i = 0; i < size1; ++i)
  307. {
  308. requestsType.setUnchecked (start1 + i, types[i]);
  309. requestsParameter.setUnchecked (start1 + i, parameters[i]);
  310. }
  311. }
  312. if (size2 > 0)
  313. {
  314. for (int i = 0; i < size2; ++i)
  315. {
  316. requestsType.setUnchecked (start2 + i, types[i + size1]);
  317. requestsParameter.setUnchecked (start2 + i, parameters[i + size1]);
  318. }
  319. }
  320. abstractFifo.finishedWrite (size1 + size2);
  321. }
  322. /** Reads requests from the fifo */
  323. void readFromFifo (ChangeRequest& type, juce::var& parameter)
  324. {
  325. int start1, size1, start2, size2;
  326. abstractFifo.prepareToRead (1, start1, size1, start2, size2);
  327. if (size1 > 0)
  328. {
  329. type = requestsType[start1];
  330. parameter = requestsParameter[start1];
  331. }
  332. if (size2 > 0)
  333. {
  334. type = requestsType[start2];
  335. parameter = requestsParameter[start2];
  336. }
  337. abstractFifo.finishedRead (size1 + size2);
  338. }
  339. /** Returns the number of requests that still need to be processed */
  340. int getNumRemainingEntries() const noexcept
  341. {
  342. return abstractFifo.getNumReady();
  343. }
  344. //==============================================================================
  345. /** This function processes all the change requests to remove all the the
  346. redundant ones, and to tell what kind of initialization must be done.
  347. Depending on the results, the convolution engines might be reset, or
  348. simply updated, or they might not need any change at all.
  349. */
  350. void processFifo()
  351. {
  352. if (getNumRemainingEntries() == 0 || isThreadRunning() || mustInterpolate)
  353. return;
  354. // retrieve the information from the FIFO for processing
  355. Array<ChangeRequest> requests;
  356. Array<juce::var> requestParameters;
  357. while (getNumRemainingEntries() > 0)
  358. {
  359. ChangeRequest type = ChangeRequest::changeEngine;
  360. juce::var parameter;
  361. readFromFifo (type, parameter);
  362. requests.add (type);
  363. requestParameters.add (parameter);
  364. }
  365. // remove any useless messages
  366. for (int i = 0; i < (int) ChangeRequest::numChangeRequestTypes; ++i)
  367. {
  368. bool exists = false;
  369. for (int n = requests.size(); --n >= 0;)
  370. {
  371. if (requests[n] == (ChangeRequest) i)
  372. {
  373. if (! exists)
  374. {
  375. exists = true;
  376. }
  377. else
  378. {
  379. requests.remove (n);
  380. requestParameters.remove (n);
  381. }
  382. }
  383. }
  384. }
  385. changeLevel = 0;
  386. for (int n = 0; n < requests.size(); ++n)
  387. {
  388. switch (requests[n])
  389. {
  390. case ChangeRequest::changeEngine:
  391. changeLevel = 3;
  392. break;
  393. case ChangeRequest::changeSampleRate:
  394. {
  395. double newSampleRate = requestParameters[n];
  396. if (currentInfo.sampleRate != newSampleRate)
  397. changeLevel = 3;
  398. currentInfo.sampleRate = newSampleRate;
  399. }
  400. break;
  401. case ChangeRequest::changeMaximumBufferSize:
  402. {
  403. int newMaximumBufferSize = requestParameters[n];
  404. if (currentInfo.maximumBufferSize != (size_t) newMaximumBufferSize)
  405. changeLevel = 3;
  406. currentInfo.maximumBufferSize = (size_t) newMaximumBufferSize;
  407. }
  408. break;
  409. case ChangeRequest::changeSource:
  410. {
  411. auto* arrayParameters = requestParameters[n].getArray();
  412. auto newSourceType = static_cast<SourceType> (static_cast<int> (arrayParameters->getUnchecked (0)));
  413. if (currentInfo.sourceType != newSourceType)
  414. changeLevel = jmax (2, changeLevel);
  415. if (newSourceType == SourceType::sourceBinaryData)
  416. {
  417. auto& prm = arrayParameters->getRawDataPointer()[1];
  418. auto* newMemoryBlock = prm.getBinaryData();
  419. auto* newPtr = newMemoryBlock->getData();
  420. auto newSize = newMemoryBlock->getSize();
  421. if (currentInfo.sourceData != newPtr || currentInfo.sourceDataSize != newSize)
  422. changeLevel = jmax (2, changeLevel);
  423. currentInfo.sourceType = SourceType::sourceBinaryData;
  424. currentInfo.sourceData = newPtr;
  425. currentInfo.sourceDataSize = newSize;
  426. currentInfo.fileImpulseResponse = File();
  427. }
  428. else if (newSourceType == SourceType::sourceAudioFile)
  429. {
  430. File newFile (arrayParameters->getUnchecked (1).toString());
  431. if (currentInfo.fileImpulseResponse != newFile)
  432. changeLevel = jmax (2, changeLevel);
  433. currentInfo.sourceType = SourceType::sourceAudioFile;
  434. currentInfo.fileImpulseResponse = newFile;
  435. currentInfo.sourceData = nullptr;
  436. currentInfo.sourceDataSize = 0;
  437. }
  438. else if (newSourceType == SourceType::sourceAudioBuffer)
  439. {
  440. double bufferSampleRate (arrayParameters->getUnchecked (1));
  441. changeLevel = jmax (2, changeLevel);
  442. currentInfo.sourceType = SourceType::sourceAudioBuffer;
  443. currentInfo.bufferSampleRate = bufferSampleRate;
  444. currentInfo.fileImpulseResponse = File();
  445. currentInfo.sourceData = nullptr;
  446. currentInfo.sourceDataSize = 0;
  447. }
  448. }
  449. break;
  450. case ChangeRequest::changeImpulseResponseSize:
  451. {
  452. int64 newSize = requestParameters[n];
  453. if (currentInfo.impulseResponseSize != (size_t) newSize)
  454. changeLevel = jmax (1, changeLevel);
  455. currentInfo.impulseResponseSize = (size_t) newSize;
  456. }
  457. break;
  458. case ChangeRequest::changeStereo:
  459. {
  460. bool newWantsStereo = requestParameters[n];
  461. if (currentInfo.wantsStereo != newWantsStereo)
  462. changeLevel = jmax (1, changeLevel);
  463. currentInfo.wantsStereo = newWantsStereo;
  464. }
  465. break;
  466. case ChangeRequest::changeTrimming:
  467. {
  468. bool newWantsTrimming = requestParameters[n];
  469. if (currentInfo.wantsTrimming != newWantsTrimming)
  470. changeLevel = jmax (1, changeLevel);
  471. currentInfo.wantsTrimming = newWantsTrimming;
  472. }
  473. break;
  474. case ChangeRequest::changeNormalization:
  475. {
  476. bool newWantsNormalization = requestParameters[n];
  477. if (currentInfo.wantsNormalization != newWantsNormalization)
  478. changeLevel = jmax (1, changeLevel);
  479. currentInfo.wantsNormalization = newWantsNormalization;
  480. }
  481. break;
  482. default:
  483. jassertfalse;
  484. break;
  485. }
  486. }
  487. if (currentInfo.sourceType == SourceType::sourceNone)
  488. {
  489. currentInfo.sourceType = SourceType::sourceAudioBuffer;
  490. if (currentInfo.sampleRate == 0)
  491. currentInfo.sampleRate = 44100;
  492. if (currentInfo.maximumBufferSize == 0)
  493. currentInfo.maximumBufferSize = 128;
  494. currentInfo.bufferSampleRate = currentInfo.sampleRate;
  495. currentInfo.impulseResponseSize = 1;
  496. currentInfo.fileImpulseResponse = File();
  497. currentInfo.sourceData = nullptr;
  498. currentInfo.sourceDataSize = 0;
  499. AudioBuffer<float> newBuffer;
  500. newBuffer.setSize (1, 1);
  501. newBuffer.setSample (0, 0, 1.f);
  502. copyBufferToTemporaryLocation (newBuffer);
  503. }
  504. // action depending on the change level
  505. if (changeLevel == 3)
  506. {
  507. interpolationBuffer.setSize (2, static_cast<int> (currentInfo.maximumBufferSize));
  508. processImpulseResponse();
  509. initializeConvolutionEngines();
  510. }
  511. else if (changeLevel == 2)
  512. {
  513. startThread();
  514. }
  515. else if (changeLevel == 1)
  516. {
  517. startThread();
  518. }
  519. }
  520. //==============================================================================
  521. void copyBufferToTemporaryLocation (const AudioBuffer<float>& buffer)
  522. {
  523. const SpinLock::ScopedLockType sl (processLock);
  524. auto numChannels = buffer.getNumChannels() > 1 ? 2 : 1;
  525. temporaryBuffer.setSize (numChannels, buffer.getNumSamples(), false, false, true);
  526. for (auto channel = 0; channel < numChannels; ++channel)
  527. temporaryBuffer.copyFrom (channel, 0, buffer, channel, 0, buffer.getNumSamples());
  528. }
  529. /** Copies a buffer from a temporary location to the impulseResponseOriginal
  530. buffer for the sourceAudioBuffer. */
  531. void copyBufferFromTemporaryLocation()
  532. {
  533. const SpinLock::ScopedLockType sl (processLock);
  534. impulseResponseOriginal.setSize (2, temporaryBuffer.getNumSamples(), false, false, true);
  535. for (auto channel = 0; channel < temporaryBuffer.getNumChannels(); ++channel)
  536. impulseResponseOriginal.copyFrom (channel, 0, temporaryBuffer, channel, 0, temporaryBuffer.getNumSamples());
  537. }
  538. //==============================================================================
  539. void reset()
  540. {
  541. for (auto* e : engines)
  542. e->reset();
  543. }
  544. /** Convolution processing handling interpolation between previous and new states
  545. of the convolution engines.
  546. */
  547. void processSamples (const AudioBlock<float>& input, AudioBlock<float>& output)
  548. {
  549. processFifo();
  550. size_t numChannels = input.getNumChannels();
  551. size_t numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  552. if (mustInterpolate == false)
  553. {
  554. for (size_t channel = 0; channel < numChannels; ++channel)
  555. engines[(int) channel]->processSamples (input.getChannelPointer (channel), output.getChannelPointer (channel), numSamples);
  556. }
  557. else
  558. {
  559. auto interpolated = AudioBlock<float> (interpolationBuffer).getSubBlock (0, numSamples);
  560. for (size_t channel = 0; channel < numChannels; ++channel)
  561. {
  562. auto&& buffer = output.getSingleChannelBlock (channel);
  563. interpolationBuffer.copyFrom ((int) channel, 0, input.getChannelPointer (channel), (int) numSamples);
  564. engines[(int) channel]->processSamples (input.getChannelPointer (channel), buffer.getChannelPointer (0), numSamples);
  565. changeVolumes[channel].applyGain (buffer.getChannelPointer (0), (int) numSamples);
  566. auto* interPtr = interpolationBuffer.getWritePointer ((int) channel);
  567. engines[(int) channel + 2]->processSamples (interPtr, interPtr, numSamples);
  568. changeVolumes[channel + 2].applyGain (interPtr, (int) numSamples);
  569. buffer += interpolated.getSingleChannelBlock (channel);
  570. }
  571. if (changeVolumes[0].isSmoothing() == false)
  572. {
  573. mustInterpolate = false;
  574. for (auto channel = 0; channel < 2; ++channel)
  575. engines[channel]->copyStateFromOtherEngine (*engines[channel + 2]);
  576. }
  577. }
  578. }
  579. private:
  580. //==============================================================================
  581. void run() override
  582. {
  583. if (changeLevel == 2)
  584. {
  585. processImpulseResponse();
  586. if (isThreadRunning() && threadShouldExit())
  587. return;
  588. initializeConvolutionEngines();
  589. }
  590. else if (changeLevel == 1)
  591. {
  592. initializeConvolutionEngines();
  593. }
  594. }
  595. void processImpulseResponse()
  596. {
  597. if (currentInfo.sourceType == SourceType::sourceBinaryData)
  598. {
  599. copyAudioStreamInAudioBuffer (new MemoryInputStream (currentInfo.sourceData, currentInfo.sourceDataSize, false));
  600. }
  601. else if (currentInfo.sourceType == SourceType::sourceAudioFile)
  602. {
  603. copyAudioStreamInAudioBuffer (new FileInputStream (currentInfo.fileImpulseResponse));
  604. }
  605. else if (currentInfo.sourceType == SourceType::sourceAudioBuffer)
  606. {
  607. copyBufferFromTemporaryLocation();
  608. trimAndResampleImpulseResponse (temporaryBuffer.getNumChannels(), currentInfo.bufferSampleRate, currentInfo.wantsTrimming);
  609. }
  610. if (isThreadRunning() && threadShouldExit())
  611. return;
  612. if (currentInfo.wantsNormalization)
  613. {
  614. if (currentInfo.wantsStereo)
  615. {
  616. normalizeImpulseResponse (currentInfo.buffer->getWritePointer(0), currentInfo.buffer->getNumSamples(), 1.0);
  617. normalizeImpulseResponse (currentInfo.buffer->getWritePointer(1), currentInfo.buffer->getNumSamples(), 1.0);
  618. }
  619. else
  620. {
  621. normalizeImpulseResponse (currentInfo.buffer->getWritePointer (0), currentInfo.buffer->getNumSamples(), 1.0);
  622. }
  623. }
  624. }
  625. /** Converts the data from an audio file into a stereo audio buffer of floats, and
  626. performs resampling if necessary.
  627. */
  628. double copyAudioStreamInAudioBuffer (InputStream* stream)
  629. {
  630. AudioFormatManager manager;
  631. manager.registerBasicFormats();
  632. if (ScopedPointer<AudioFormatReader> formatReader = manager.createReaderFor (stream))
  633. {
  634. auto maximumTimeInSeconds = 10.0;
  635. int64 maximumLength = static_cast<int64> (roundDoubleToInt (maximumTimeInSeconds * formatReader->sampleRate));
  636. auto numChannels = formatReader->numChannels > 1 ? 2 : 1;
  637. impulseResponseOriginal.setSize (2, static_cast<int> (jmin (maximumLength, formatReader->lengthInSamples)), false, false, true);
  638. impulseResponseOriginal.clear();
  639. formatReader->read (&(impulseResponseOriginal), 0, impulseResponseOriginal.getNumSamples(), 0, true, numChannels > 1);
  640. return trimAndResampleImpulseResponse (numChannels, formatReader->sampleRate, currentInfo.wantsTrimming);
  641. }
  642. else
  643. return 0.0;
  644. }
  645. double trimAndResampleImpulseResponse (int numChannels, double bufferSampleRate, bool mustTrim)
  646. {
  647. auto thresholdTrim = Decibels::decibelsToGain (-80.0f);
  648. auto indexStart = 0;
  649. auto indexEnd = impulseResponseOriginal.getNumSamples() - 1;
  650. if (mustTrim)
  651. {
  652. indexStart = impulseResponseOriginal.getNumSamples() - 1;
  653. indexEnd = 0;
  654. for (auto channel = 0; channel < numChannels; ++channel)
  655. {
  656. auto localIndexStart = 0;
  657. auto localIndexEnd = impulseResponseOriginal.getNumSamples() - 1;
  658. auto* channelData = impulseResponseOriginal.getReadPointer (channel);
  659. while (localIndexStart < impulseResponseOriginal.getNumSamples() - 1
  660. && channelData[localIndexStart] <= thresholdTrim
  661. && channelData[localIndexStart] >= -thresholdTrim)
  662. ++localIndexStart;
  663. while (localIndexEnd >= 0
  664. && channelData[localIndexEnd] <= thresholdTrim
  665. && channelData[localIndexEnd] >= -thresholdTrim)
  666. --localIndexEnd;
  667. indexStart = jmin (indexStart, localIndexStart);
  668. indexEnd = jmax (indexEnd, localIndexEnd);
  669. }
  670. if (indexStart > 0)
  671. {
  672. for (auto channel = 0; channel < numChannels; ++channel)
  673. {
  674. auto* channelData = impulseResponseOriginal.getWritePointer (channel);
  675. for (auto i = 0; i < indexEnd - indexStart + 1; ++i)
  676. channelData[i] = channelData[i + indexStart];
  677. for (auto i = indexEnd - indexStart + 1; i < impulseResponseOriginal.getNumSamples() - 1; ++i)
  678. channelData[i] = 0.0f;
  679. }
  680. }
  681. }
  682. double factorReading;
  683. if (currentInfo.sampleRate == bufferSampleRate)
  684. {
  685. // No resampling
  686. factorReading = 1.0;
  687. auto impulseSize = jmin (static_cast<int> (currentInfo.impulseResponseSize), indexEnd - indexStart + 1);
  688. impulseResponse.setSize (2, impulseSize);
  689. impulseResponse.clear();
  690. for (auto channel = 0; channel < numChannels; ++channel)
  691. impulseResponse.copyFrom (channel, 0, impulseResponseOriginal, channel, 0, impulseSize);
  692. }
  693. else
  694. {
  695. // Resampling
  696. factorReading = bufferSampleRate / currentInfo.sampleRate;
  697. auto impulseSize = jmin (static_cast<int> (currentInfo.impulseResponseSize), roundDoubleToInt ((indexEnd - indexStart + 1) / factorReading));
  698. impulseResponse.setSize (2, impulseSize);
  699. impulseResponse.clear();
  700. MemoryAudioSource memorySource (impulseResponseOriginal, false);
  701. ResamplingAudioSource resamplingSource (&memorySource, false, numChannels);
  702. resamplingSource.setResamplingRatio (factorReading);
  703. resamplingSource.prepareToPlay (impulseSize, currentInfo.sampleRate);
  704. AudioSourceChannelInfo info;
  705. info.startSample = 0;
  706. info.numSamples = impulseSize;
  707. info.buffer = &impulseResponse;
  708. resamplingSource.getNextAudioBlock (info);
  709. }
  710. // Filling the second channel with the first if necessary
  711. if (numChannels == 1)
  712. impulseResponse.copyFrom (1, 0, impulseResponse, 0, 0, impulseResponse.getNumSamples());
  713. return factorReading;
  714. }
  715. void normalizeImpulseResponse (float* samples, int numSamples, double factorResampling) const
  716. {
  717. auto magnitude = 0.0f;
  718. for (int i = 0; i < numSamples; ++i)
  719. magnitude += samples[i] * samples[i];
  720. auto magnitudeInv = 1.0f / (4.0f * std::sqrt (magnitude)) * 0.5f * static_cast <float> (factorResampling);
  721. for (int i = 0; i < numSamples; ++i)
  722. samples[i] *= magnitudeInv;
  723. }
  724. void initializeConvolutionEngines()
  725. {
  726. if (currentInfo.maximumBufferSize == 0)
  727. return;
  728. auto numChannels = (currentInfo.wantsStereo ? 2 : 1);
  729. if (changeLevel == 3)
  730. {
  731. for (int i = 0; i < numChannels; ++i)
  732. engines[i]->initializeConvolutionEngine (currentInfo, i);
  733. if (numChannels == 1)
  734. engines[1]->copyStateFromOtherEngine (*engines[0]);
  735. mustInterpolate = false;
  736. }
  737. else
  738. {
  739. for (int i = 0; i < numChannels; ++i)
  740. {
  741. engines[i + 2]->initializeConvolutionEngine (currentInfo, i);
  742. engines[i + 2]->reset();
  743. if (isThreadRunning() && threadShouldExit())
  744. return;
  745. }
  746. if (numChannels == 1)
  747. engines[3]->copyStateFromOtherEngine (*engines[2]);
  748. for (size_t i = 0; i < 2; ++i)
  749. {
  750. changeVolumes[i].setValue (1.0f);
  751. changeVolumes[i].reset (currentInfo.sampleRate, 0.05);
  752. changeVolumes[i].setValue (0.0f);
  753. changeVolumes[i + 2].setValue (0.0f);
  754. changeVolumes[i + 2].reset (currentInfo.sampleRate, 0.05);
  755. changeVolumes[i + 2].setValue (1.0f);
  756. }
  757. mustInterpolate = true;
  758. }
  759. }
  760. //==============================================================================
  761. static constexpr int fifoSize = 256; // the size of the fifo which handles all the change requests
  762. AbstractFifo abstractFifo; // the abstract fifo
  763. Array<ChangeRequest> requestsType; // an array of ChangeRequest
  764. Array<juce::var> requestsParameter; // an array of change parameters
  765. int changeLevel = 0; // the current level of requested change in the convolution engine
  766. //==============================================================================
  767. ConvolutionEngine::ProcessingInformation currentInfo; // the information about the impulse response to load
  768. AudioBuffer<float> temporaryBuffer; // a temporary buffer that is used when the function copyAndLoadImpulseResponse is called in the main API
  769. SpinLock processLock; // a necessary lock to use with this temporary buffer
  770. AudioBuffer<float> impulseResponseOriginal; // a buffer with the original impulse response
  771. AudioBuffer<float> impulseResponse; // a buffer with the impulse response trimmed, resampled, resized and normalized
  772. //==============================================================================
  773. OwnedArray<ConvolutionEngine> engines; // the 4 convolution engines being used
  774. AudioBuffer<float> interpolationBuffer; // a buffer to do the interpolation between the convolution engines 0-1 and 2-3
  775. LinearSmoothedValue<float> changeVolumes[4]; // the volumes for each convolution engine during interpolation
  776. bool mustInterpolate = false; // tells if the convolution engines outputs must be currently interpolated
  777. //==============================================================================
  778. JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (Pimpl)
  779. };
  780. //==============================================================================
  781. Convolution::Convolution()
  782. {
  783. pimpl = new Pimpl();
  784. pimpl->addToFifo (Convolution::Pimpl::ChangeRequest::changeEngine, juce::var (0));
  785. }
  786. Convolution::~Convolution()
  787. {
  788. }
  789. void Convolution::loadImpulseResponse (const void* sourceData, size_t sourceDataSize,
  790. bool wantsStereo, bool wantsTrimming, size_t size,
  791. bool wantsNormalization)
  792. {
  793. if (sourceData == nullptr)
  794. return;
  795. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
  796. Pimpl::ChangeRequest::changeImpulseResponseSize,
  797. Pimpl::ChangeRequest::changeStereo,
  798. Pimpl::ChangeRequest::changeTrimming,
  799. Pimpl::ChangeRequest::changeNormalization };
  800. Array<juce::var> sourceParameter;
  801. sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceBinaryData));
  802. sourceParameter.add (juce::var (sourceData, sourceDataSize));
  803. juce::var parameters[] = { juce::var (sourceParameter),
  804. juce::var (static_cast<int64> (size)),
  805. juce::var (wantsStereo),
  806. juce::var (wantsTrimming),
  807. juce::var (wantsNormalization) };
  808. pimpl->addToFifo (types, parameters, 5);
  809. }
  810. void Convolution::loadImpulseResponse (const File& fileImpulseResponse, bool wantsStereo,
  811. bool wantsTrimming, size_t size, bool wantsNormalization)
  812. {
  813. if (! fileImpulseResponse.existsAsFile())
  814. return;
  815. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
  816. Pimpl::ChangeRequest::changeImpulseResponseSize,
  817. Pimpl::ChangeRequest::changeStereo,
  818. Pimpl::ChangeRequest::changeTrimming,
  819. Pimpl::ChangeRequest::changeNormalization };
  820. Array<juce::var> sourceParameter;
  821. sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceAudioFile));
  822. sourceParameter.add (juce::var (fileImpulseResponse.getFullPathName()));
  823. juce::var parameters[] = { juce::var (sourceParameter),
  824. juce::var (static_cast<int64> (size)),
  825. juce::var (wantsStereo),
  826. juce::var (wantsTrimming),
  827. juce::var (wantsNormalization) };
  828. pimpl->addToFifo (types, parameters, 5);
  829. }
  830. void Convolution::copyAndLoadImpulseResponseFromBuffer (const AudioBuffer<float>& buffer,
  831. double bufferSampleRate, bool wantsStereo, bool wantsTrimming, bool wantsNormalization, size_t size)
  832. {
  833. jassert (bufferSampleRate > 0);
  834. if (buffer.getNumSamples() == 0)
  835. return;
  836. pimpl->copyBufferToTemporaryLocation (buffer);
  837. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSource,
  838. Pimpl::ChangeRequest::changeImpulseResponseSize,
  839. Pimpl::ChangeRequest::changeStereo,
  840. Pimpl::ChangeRequest::changeTrimming,
  841. Pimpl::ChangeRequest::changeNormalization };
  842. Array<juce::var> sourceParameter;
  843. sourceParameter.add (juce::var ((int) ConvolutionEngine::ProcessingInformation::SourceType::sourceAudioBuffer));
  844. sourceParameter.add (juce::var (bufferSampleRate));
  845. juce::var parameters[] = { juce::var (sourceParameter),
  846. juce::var (static_cast<int64> (size)),
  847. juce::var (wantsStereo),
  848. juce::var (wantsTrimming),
  849. juce::var (wantsNormalization) };
  850. pimpl->addToFifo (types, parameters, 5);
  851. }
  852. void Convolution::prepare (const ProcessSpec& spec)
  853. {
  854. jassert (isPositiveAndBelow (spec.numChannels, static_cast<uint32> (3))); // only mono and stereo is supported
  855. Pimpl::ChangeRequest types[] = { Pimpl::ChangeRequest::changeSampleRate,
  856. Pimpl::ChangeRequest::changeMaximumBufferSize };
  857. juce::var parameters[] = { juce::var (spec.sampleRate),
  858. juce::var (static_cast<int> (spec.maximumBlockSize)) };
  859. pimpl->addToFifo (types, parameters, 2);
  860. for (size_t channel = 0; channel < spec.numChannels; ++channel)
  861. {
  862. volumeDry[channel].reset (spec.sampleRate, 0.05);
  863. volumeWet[channel].reset (spec.sampleRate, 0.05);
  864. }
  865. sampleRate = spec.sampleRate;
  866. dryBuffer = AudioBlock<float> (dryBufferStorage,
  867. jmin (spec.numChannels, 2u),
  868. spec.maximumBlockSize);
  869. isActive = true;
  870. }
  871. void Convolution::reset() noexcept
  872. {
  873. dryBuffer.clear();
  874. pimpl->reset();
  875. }
  876. void Convolution::processSamples (const AudioBlock<float>& input, AudioBlock<float>& output, bool isBypassed) noexcept
  877. {
  878. if (! isActive)
  879. return;
  880. jassert (input.getNumChannels() == output.getNumChannels());
  881. jassert (isPositiveAndBelow (input.getNumChannels(), static_cast<size_t> (3))); // only mono and stereo is supported
  882. auto numChannels = input.getNumChannels();
  883. auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
  884. auto dry = dryBuffer.getSubsetChannelBlock (0, numChannels);
  885. if (volumeDry[0].isSmoothing())
  886. {
  887. dry.copy (input);
  888. for (size_t channel = 0; channel < numChannels; ++channel)
  889. volumeDry[channel].applyGain (dry.getChannelPointer (channel), (int) numSamples);
  890. pimpl->processSamples (input, output);
  891. for (size_t channel = 0; channel < numChannels; ++channel)
  892. volumeWet[channel].applyGain (output.getChannelPointer (channel), (int) numSamples);
  893. output += dry;
  894. }
  895. else
  896. {
  897. if (! currentIsBypassed)
  898. pimpl->processSamples (input, output);
  899. if (isBypassed != currentIsBypassed)
  900. {
  901. currentIsBypassed = isBypassed;
  902. for (size_t channel = 0; channel < numChannels; ++channel)
  903. {
  904. volumeDry[channel].setValue (isBypassed ? 0.0f : 1.0f);
  905. volumeDry[channel].reset (sampleRate, 0.05);
  906. volumeDry[channel].setValue (isBypassed ? 1.0f : 0.0f);
  907. volumeWet[channel].setValue (isBypassed ? 1.0f : 0.0f);
  908. volumeWet[channel].reset (sampleRate, 0.05);
  909. volumeWet[channel].setValue (isBypassed ? 0.0f : 1.0f);
  910. }
  911. }
  912. }
  913. }
  914. } // namespace dsp
  915. } // namespace juce