Audio plugin host https://kx.studio/carla
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1872 lines
70KB

  1. /*
  2. ==============================================================================
  3. This file is part of the Water library.
  4. Copyright (c) 2015 ROLI Ltd.
  5. Copyright (C) 2017-2020 Filipe Coelho <falktx@falktx.com>
  6. Permission is granted to use this software under the terms of the GNU
  7. General Public License as published by the Free Software Foundation;
  8. either version 2 of the License, or any later version.
  9. This program is distributed in the hope that it will be useful, but WITHOUT
  10. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  11. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  12. For a full copy of the GNU General Public License see the doc/GPL.txt file.
  13. ==============================================================================
  14. */
  15. #include "AudioProcessorGraph.h"
  16. #include "../containers/SortedSet.h"
  17. namespace water {
  18. //==============================================================================
  19. namespace GraphRenderingOps
  20. {
  21. struct AudioGraphRenderingOpBase
  22. {
  23. AudioGraphRenderingOpBase() noexcept {}
  24. virtual ~AudioGraphRenderingOpBase() {}
  25. virtual void perform (AudioSampleBuffer& sharedAudioBufferChans,
  26. AudioSampleBuffer& sharedCVBufferChans,
  27. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  28. const int numSamples) = 0;
  29. };
  30. // use CRTP
  31. template <class Child>
  32. struct AudioGraphRenderingOp : public AudioGraphRenderingOpBase
  33. {
  34. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  35. AudioSampleBuffer& sharedCVBufferChans,
  36. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  37. const int numSamples) override
  38. {
  39. static_cast<Child*> (this)->perform (sharedAudioBufferChans,
  40. sharedCVBufferChans,
  41. sharedMidiBuffers,
  42. numSamples);
  43. }
  44. };
  45. //==============================================================================
  46. struct ClearChannelOp : public AudioGraphRenderingOp<ClearChannelOp>
  47. {
  48. ClearChannelOp (const int channel, const bool cv) noexcept
  49. : channelNum (channel), isCV (cv) {}
  50. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  51. AudioSampleBuffer& sharedCVBufferChans,
  52. const OwnedArray<MidiBuffer>&,
  53. const int numSamples)
  54. {
  55. if (isCV)
  56. sharedCVBufferChans.clear (channelNum, 0, numSamples);
  57. else
  58. sharedAudioBufferChans.clear (channelNum, 0, numSamples);
  59. }
  60. const int channelNum;
  61. const bool isCV;
  62. CARLA_DECLARE_NON_COPY_CLASS (ClearChannelOp)
  63. };
  64. //==============================================================================
  65. struct CopyChannelOp : public AudioGraphRenderingOp<CopyChannelOp>
  66. {
  67. CopyChannelOp (const int srcChan, const int dstChan, const bool cv) noexcept
  68. : srcChannelNum (srcChan), dstChannelNum (dstChan), isCV (cv) {}
  69. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  70. AudioSampleBuffer& sharedCVBufferChans,
  71. const OwnedArray<MidiBuffer>&,
  72. const int numSamples)
  73. {
  74. if (isCV)
  75. sharedCVBufferChans.copyFrom (dstChannelNum, 0, sharedCVBufferChans, srcChannelNum, 0, numSamples);
  76. else
  77. sharedAudioBufferChans.copyFrom (dstChannelNum, 0, sharedAudioBufferChans, srcChannelNum, 0, numSamples);
  78. }
  79. const int srcChannelNum, dstChannelNum;
  80. const bool isCV;
  81. CARLA_DECLARE_NON_COPY_CLASS (CopyChannelOp)
  82. };
  83. //==============================================================================
  84. struct AddChannelOp : public AudioGraphRenderingOp<AddChannelOp>
  85. {
  86. AddChannelOp (const int srcChan, const int dstChan, const bool cv) noexcept
  87. : srcChannelNum (srcChan), dstChannelNum (dstChan), isCV (cv) {}
  88. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  89. AudioSampleBuffer& sharedCVBufferChans,
  90. const OwnedArray<MidiBuffer>&,
  91. const int numSamples)
  92. {
  93. if (isCV)
  94. sharedCVBufferChans.addFrom (dstChannelNum, 0, sharedCVBufferChans, srcChannelNum, 0, numSamples);
  95. else
  96. sharedAudioBufferChans.addFrom (dstChannelNum, 0, sharedAudioBufferChans, srcChannelNum, 0, numSamples);
  97. }
  98. const int srcChannelNum, dstChannelNum;
  99. const bool isCV;
  100. CARLA_DECLARE_NON_COPY_CLASS (AddChannelOp)
  101. };
  102. //==============================================================================
  103. struct ClearMidiBufferOp : public AudioGraphRenderingOp<ClearMidiBufferOp>
  104. {
  105. ClearMidiBufferOp (const int buffer) noexcept : bufferNum (buffer) {}
  106. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  107. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  108. const int)
  109. {
  110. sharedMidiBuffers.getUnchecked (bufferNum)->clear();
  111. }
  112. const int bufferNum;
  113. CARLA_DECLARE_NON_COPY_CLASS (ClearMidiBufferOp)
  114. };
  115. //==============================================================================
  116. struct CopyMidiBufferOp : public AudioGraphRenderingOp<CopyMidiBufferOp>
  117. {
  118. CopyMidiBufferOp (const int srcBuffer, const int dstBuffer) noexcept
  119. : srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
  120. {}
  121. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  122. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  123. const int)
  124. {
  125. *sharedMidiBuffers.getUnchecked (dstBufferNum) = *sharedMidiBuffers.getUnchecked (srcBufferNum);
  126. }
  127. const int srcBufferNum, dstBufferNum;
  128. CARLA_DECLARE_NON_COPY_CLASS (CopyMidiBufferOp)
  129. };
  130. //==============================================================================
  131. struct AddMidiBufferOp : public AudioGraphRenderingOp<AddMidiBufferOp>
  132. {
  133. AddMidiBufferOp (const int srcBuffer, const int dstBuffer)
  134. : srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
  135. {}
  136. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  137. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  138. const int numSamples)
  139. {
  140. sharedMidiBuffers.getUnchecked (dstBufferNum)
  141. ->addEvents (*sharedMidiBuffers.getUnchecked (srcBufferNum), 0, numSamples, 0);
  142. }
  143. const int srcBufferNum, dstBufferNum;
  144. CARLA_DECLARE_NON_COPY_CLASS (AddMidiBufferOp)
  145. };
  146. //==============================================================================
  147. struct DelayChannelOp : public AudioGraphRenderingOp<DelayChannelOp>
  148. {
  149. DelayChannelOp (const int chan, const int delaySize, const bool cv)
  150. : channel (chan),
  151. bufferSize (delaySize + 1),
  152. readIndex (0), writeIndex (delaySize),
  153. isCV (cv)
  154. {
  155. buffer.calloc ((size_t) bufferSize);
  156. }
  157. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  158. AudioSampleBuffer& sharedCVBufferChans,
  159. const OwnedArray<MidiBuffer>&,
  160. const int numSamples)
  161. {
  162. float* data = isCV
  163. ? sharedCVBufferChans.getWritePointer (channel, 0)
  164. : sharedAudioBufferChans.getWritePointer (channel, 0);
  165. HeapBlock<float>& block = buffer;
  166. for (int i = numSamples; --i >= 0;)
  167. {
  168. block [writeIndex] = *data;
  169. *data++ = block [readIndex];
  170. if (++readIndex >= bufferSize) readIndex = 0;
  171. if (++writeIndex >= bufferSize) writeIndex = 0;
  172. }
  173. }
  174. private:
  175. HeapBlock<float> buffer;
  176. const int channel, bufferSize;
  177. int readIndex, writeIndex;
  178. const bool isCV;
  179. CARLA_DECLARE_NON_COPY_CLASS (DelayChannelOp)
  180. };
  181. //==============================================================================
  182. struct ProcessBufferOp : public AudioGraphRenderingOp<ProcessBufferOp>
  183. {
  184. ProcessBufferOp (const AudioProcessorGraph::Node::Ptr& n,
  185. const Array<uint>& audioChannelsUsed,
  186. const uint totalNumChans,
  187. const Array<uint>& cvInChannelsUsed,
  188. const Array<uint>& cvOutChannelsUsed,
  189. const int midiBuffer)
  190. : node (n),
  191. processor (n->getProcessor()),
  192. audioChannelsToUse (audioChannelsUsed),
  193. cvInChannelsToUse (cvInChannelsUsed),
  194. cvOutChannelsToUse (cvOutChannelsUsed),
  195. totalAudioChans (jmax (1U, totalNumChans)),
  196. totalCVIns (cvInChannelsUsed.size()),
  197. totalCVOuts (cvOutChannelsUsed.size()),
  198. midiBufferToUse (midiBuffer)
  199. {
  200. audioChannels.calloc (totalAudioChans);
  201. cvInChannels.calloc (totalCVIns);
  202. cvOutChannels.calloc (totalCVOuts);
  203. while (audioChannelsToUse.size() < static_cast<int>(totalAudioChans))
  204. audioChannelsToUse.add (0);
  205. }
  206. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  207. AudioSampleBuffer& sharedCVBufferChans,
  208. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  209. const int numSamples)
  210. {
  211. HeapBlock<float*>& audioChannelsCopy = audioChannels;
  212. HeapBlock<float*>& cvInChannelsCopy = cvInChannels;
  213. HeapBlock<float*>& cvOutChannelsCopy = cvOutChannels;
  214. for (uint i = 0; i < totalAudioChans; ++i)
  215. audioChannelsCopy[i] = sharedAudioBufferChans.getWritePointer (audioChannelsToUse.getUnchecked (i), 0);
  216. for (uint i = 0; i < totalCVIns; ++i)
  217. cvInChannels[i] = sharedCVBufferChans.getWritePointer (cvInChannelsToUse.getUnchecked (i), 0);
  218. for (uint i = 0; i < totalCVOuts; ++i)
  219. cvOutChannels[i] = sharedCVBufferChans.getWritePointer (cvOutChannelsToUse.getUnchecked (i), 0);
  220. AudioSampleBuffer audioBuffer (audioChannelsCopy, totalAudioChans, numSamples);
  221. AudioSampleBuffer cvInBuffer (cvInChannelsCopy, totalCVIns, numSamples);
  222. AudioSampleBuffer cvOutBuffer (cvOutChannelsCopy, totalCVOuts, numSamples);
  223. if (processor->isSuspended())
  224. {
  225. audioBuffer.clear();
  226. cvOutBuffer.clear();
  227. }
  228. else
  229. {
  230. const CarlaRecursiveMutexLocker cml (processor->getCallbackLock());
  231. callProcess (audioBuffer, cvInBuffer, cvOutBuffer, *sharedMidiBuffers.getUnchecked (midiBufferToUse));
  232. }
  233. }
  234. void callProcess (AudioSampleBuffer& audioBuffer,
  235. AudioSampleBuffer& cvInBuffer,
  236. AudioSampleBuffer& cvOutBuffer,
  237. MidiBuffer& midiMessages)
  238. {
  239. processor->processBlockWithCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  240. }
  241. const AudioProcessorGraph::Node::Ptr node;
  242. AudioProcessor* const processor;
  243. private:
  244. Array<uint> audioChannelsToUse;
  245. Array<uint> cvInChannelsToUse;
  246. Array<uint> cvOutChannelsToUse;
  247. HeapBlock<float*> audioChannels;
  248. HeapBlock<float*> cvInChannels;
  249. HeapBlock<float*> cvOutChannels;
  250. AudioSampleBuffer tempBuffer;
  251. const uint totalAudioChans;
  252. const uint totalCVIns;
  253. const uint totalCVOuts;
  254. const int midiBufferToUse;
  255. CARLA_DECLARE_NON_COPY_CLASS (ProcessBufferOp)
  256. };
  257. //==============================================================================
  258. /** Used to calculate the correct sequence of rendering ops needed, based on
  259. the best re-use of shared buffers at each stage.
  260. */
  261. struct RenderingOpSequenceCalculator
  262. {
  263. RenderingOpSequenceCalculator (AudioProcessorGraph& g,
  264. const Array<AudioProcessorGraph::Node*>& nodes,
  265. Array<void*>& renderingOps)
  266. : graph (g),
  267. orderedNodes (nodes),
  268. totalLatency (0)
  269. {
  270. audioNodeIds.add ((uint32) zeroNodeID); // first buffer is read-only zeros
  271. audioChannels.add (0);
  272. cvNodeIds.add ((uint32) zeroNodeID);
  273. cvChannels.add (0);
  274. midiNodeIds.add ((uint32) zeroNodeID);
  275. for (int i = 0; i < orderedNodes.size(); ++i)
  276. {
  277. createRenderingOpsForNode (*orderedNodes.getUnchecked(i), renderingOps, i);
  278. markAnyUnusedBuffersAsFree (i);
  279. }
  280. graph.setLatencySamples (totalLatency);
  281. }
  282. int getNumAudioBuffersNeeded() const noexcept { return audioNodeIds.size(); }
  283. int getNumCVBuffersNeeded() const noexcept { return cvNodeIds.size(); }
  284. int getNumMidiBuffersNeeded() const noexcept { return midiNodeIds.size(); }
  285. private:
  286. //==============================================================================
  287. AudioProcessorGraph& graph;
  288. const Array<AudioProcessorGraph::Node*>& orderedNodes;
  289. Array<uint> audioChannels, cvChannels;
  290. Array<uint32> audioNodeIds, cvNodeIds, midiNodeIds;
  291. enum { freeNodeID = 0xffffffff, zeroNodeID = 0xfffffffe, anonymousNodeID = 0xfffffffd };
  292. static bool isNodeBusy (uint32 nodeID) noexcept { return nodeID != freeNodeID; }
  293. Array<uint32> nodeDelayIDs;
  294. Array<int> nodeDelays;
  295. int totalLatency;
  296. int getNodeDelay (const uint32 nodeID) const { return nodeDelays [nodeDelayIDs.indexOf (nodeID)]; }
  297. void setNodeDelay (const uint32 nodeID, const int latency)
  298. {
  299. const int index = nodeDelayIDs.indexOf (nodeID);
  300. if (index >= 0)
  301. {
  302. nodeDelays.set (index, latency);
  303. }
  304. else
  305. {
  306. nodeDelayIDs.add (nodeID);
  307. nodeDelays.add (latency);
  308. }
  309. }
  310. int getInputLatencyForNode (const uint32 nodeID) const
  311. {
  312. int maxLatency = 0;
  313. for (int i = graph.getNumConnections(); --i >= 0;)
  314. {
  315. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  316. if (c->destNodeId == nodeID)
  317. maxLatency = jmax (maxLatency, getNodeDelay (c->sourceNodeId));
  318. }
  319. return maxLatency;
  320. }
  321. //==============================================================================
  322. void createRenderingOpsForNode (AudioProcessorGraph::Node& node,
  323. Array<void*>& renderingOps,
  324. const int ourRenderingIndex)
  325. {
  326. AudioProcessor& processor = *node.getProcessor();
  327. const uint numAudioIns = processor.getTotalNumInputChannels(AudioProcessor::ChannelTypeAudio);
  328. const uint numAudioOuts = processor.getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio);
  329. const uint numCVIns = processor.getTotalNumInputChannels(AudioProcessor::ChannelTypeCV);
  330. const uint numCVOuts = processor.getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV);
  331. const uint totalAudioChans = jmax (numAudioIns, numAudioOuts);
  332. Array<uint> audioChannelsToUse, cvInChannelsToUse, cvOutChannelsToUse;
  333. int midiBufferToUse = -1;
  334. int maxLatency = getInputLatencyForNode (node.nodeId);
  335. for (uint inputChan = 0; inputChan < numAudioIns; ++inputChan)
  336. {
  337. // get a list of all the inputs to this node
  338. Array<uint32> sourceNodes;
  339. Array<uint> sourceOutputChans;
  340. for (int i = graph.getNumConnections(); --i >= 0;)
  341. {
  342. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  343. if (c->destNodeId == node.nodeId
  344. && c->destChannelIndex == inputChan
  345. && c->channelType == AudioProcessor::ChannelTypeAudio)
  346. {
  347. sourceNodes.add (c->sourceNodeId);
  348. sourceOutputChans.add (c->sourceChannelIndex);
  349. }
  350. }
  351. int bufIndex = -1;
  352. if (sourceNodes.size() == 0)
  353. {
  354. // unconnected input channel
  355. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  356. renderingOps.add (new ClearChannelOp (bufIndex, false));
  357. }
  358. else if (sourceNodes.size() == 1)
  359. {
  360. // channel with a straightforward single input..
  361. const uint32 srcNode = sourceNodes.getUnchecked(0);
  362. const uint srcChan = sourceOutputChans.getUnchecked(0);
  363. bufIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio, srcNode, srcChan);
  364. if (bufIndex < 0)
  365. {
  366. // if not found, this is probably a feedback loop
  367. bufIndex = getReadOnlyEmptyBuffer();
  368. wassert (bufIndex >= 0);
  369. }
  370. if (inputChan < numAudioOuts
  371. && isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  372. ourRenderingIndex,
  373. inputChan,
  374. srcNode, srcChan))
  375. {
  376. // can't mess up this channel because it's needed later by another node, so we
  377. // need to use a copy of it..
  378. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  379. renderingOps.add (new CopyChannelOp (bufIndex, newFreeBuffer, false));
  380. bufIndex = newFreeBuffer;
  381. }
  382. const int nodeDelay = getNodeDelay (srcNode);
  383. if (nodeDelay < maxLatency)
  384. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, false));
  385. }
  386. else
  387. {
  388. // channel with a mix of several inputs..
  389. // try to find a re-usable channel from our inputs..
  390. int reusableInputIndex = -1;
  391. for (int i = 0; i < sourceNodes.size(); ++i)
  392. {
  393. const int sourceBufIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  394. sourceNodes.getUnchecked(i),
  395. sourceOutputChans.getUnchecked(i));
  396. if (sourceBufIndex >= 0
  397. && ! isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  398. ourRenderingIndex,
  399. inputChan,
  400. sourceNodes.getUnchecked(i),
  401. sourceOutputChans.getUnchecked(i)))
  402. {
  403. // we've found one of our input chans that can be re-used..
  404. reusableInputIndex = i;
  405. bufIndex = sourceBufIndex;
  406. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (i));
  407. if (nodeDelay < maxLatency)
  408. renderingOps.add (new DelayChannelOp (sourceBufIndex, maxLatency - nodeDelay, false));
  409. break;
  410. }
  411. }
  412. if (reusableInputIndex < 0)
  413. {
  414. // can't re-use any of our input chans, so get a new one and copy everything into it..
  415. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  416. wassert (bufIndex != 0);
  417. markBufferAsContaining (AudioProcessor::ChannelTypeAudio,
  418. bufIndex, static_cast<uint32> (anonymousNodeID), 0);
  419. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  420. sourceNodes.getUnchecked (0),
  421. sourceOutputChans.getUnchecked (0));
  422. if (srcIndex < 0)
  423. {
  424. // if not found, this is probably a feedback loop
  425. renderingOps.add (new ClearChannelOp (bufIndex, false));
  426. }
  427. else
  428. {
  429. renderingOps.add (new CopyChannelOp (srcIndex, bufIndex, false));
  430. }
  431. reusableInputIndex = 0;
  432. const int nodeDelay = getNodeDelay (sourceNodes.getFirst());
  433. if (nodeDelay < maxLatency)
  434. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, false));
  435. }
  436. for (int j = 0; j < sourceNodes.size(); ++j)
  437. {
  438. if (j != reusableInputIndex)
  439. {
  440. int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  441. sourceNodes.getUnchecked(j),
  442. sourceOutputChans.getUnchecked(j));
  443. if (srcIndex >= 0)
  444. {
  445. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (j));
  446. if (nodeDelay < maxLatency)
  447. {
  448. if (! isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  449. ourRenderingIndex, inputChan,
  450. sourceNodes.getUnchecked(j),
  451. sourceOutputChans.getUnchecked(j)))
  452. {
  453. renderingOps.add (new DelayChannelOp (srcIndex, maxLatency - nodeDelay, false));
  454. }
  455. else // buffer is reused elsewhere, can't be delayed
  456. {
  457. const int bufferToDelay = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  458. renderingOps.add (new CopyChannelOp (srcIndex, bufferToDelay, false));
  459. renderingOps.add (new DelayChannelOp (bufferToDelay, maxLatency - nodeDelay, false));
  460. srcIndex = bufferToDelay;
  461. }
  462. }
  463. renderingOps.add (new AddChannelOp (srcIndex, bufIndex, false));
  464. }
  465. }
  466. }
  467. }
  468. CARLA_SAFE_ASSERT_CONTINUE (bufIndex >= 0);
  469. audioChannelsToUse.add (bufIndex);
  470. if (inputChan < numAudioOuts)
  471. markBufferAsContaining (AudioProcessor::ChannelTypeAudio, bufIndex, node.nodeId, inputChan);
  472. }
  473. for (uint outputChan = numAudioIns; outputChan < numAudioOuts; ++outputChan)
  474. {
  475. const int bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  476. CARLA_SAFE_ASSERT_CONTINUE (bufIndex > 0);
  477. audioChannelsToUse.add (bufIndex);
  478. markBufferAsContaining (AudioProcessor::ChannelTypeAudio, bufIndex, node.nodeId, outputChan);
  479. }
  480. for (uint inputChan = 0; inputChan < numCVIns; ++inputChan)
  481. {
  482. // get a list of all the inputs to this node
  483. Array<uint32> sourceNodes;
  484. Array<uint> sourceOutputChans;
  485. for (int i = graph.getNumConnections(); --i >= 0;)
  486. {
  487. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  488. if (c->destNodeId == node.nodeId
  489. && c->destChannelIndex == inputChan
  490. && c->channelType == AudioProcessor::ChannelTypeCV)
  491. {
  492. sourceNodes.add (c->sourceNodeId);
  493. sourceOutputChans.add (c->sourceChannelIndex);
  494. }
  495. }
  496. int bufIndex = -1;
  497. if (sourceNodes.size() == 0)
  498. {
  499. // unconnected input channel
  500. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  501. renderingOps.add (new ClearChannelOp (bufIndex, true));
  502. }
  503. else if (sourceNodes.size() == 1)
  504. {
  505. // channel with a straightforward single input..
  506. const uint32 srcNode = sourceNodes.getUnchecked(0);
  507. const uint srcChan = sourceOutputChans.getUnchecked(0);
  508. bufIndex = getBufferContaining (AudioProcessor::ChannelTypeCV, srcNode, srcChan);
  509. if (bufIndex < 0)
  510. {
  511. // if not found, this is probably a feedback loop
  512. bufIndex = getReadOnlyEmptyBuffer();
  513. wassert (bufIndex >= 0);
  514. }
  515. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  516. renderingOps.add (new CopyChannelOp (bufIndex, newFreeBuffer, true));
  517. bufIndex = newFreeBuffer;
  518. const int nodeDelay = getNodeDelay (srcNode);
  519. if (nodeDelay < maxLatency)
  520. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, true));
  521. }
  522. else
  523. {
  524. // channel with a mix of several inputs..
  525. {
  526. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  527. wassert (bufIndex != 0);
  528. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeCV,
  529. sourceNodes.getUnchecked (0),
  530. sourceOutputChans.getUnchecked (0));
  531. if (srcIndex < 0)
  532. {
  533. // if not found, this is probably a feedback loop
  534. renderingOps.add (new ClearChannelOp (bufIndex, true));
  535. }
  536. else
  537. {
  538. renderingOps.add (new CopyChannelOp (srcIndex, bufIndex, true));
  539. }
  540. const int nodeDelay = getNodeDelay (sourceNodes.getFirst());
  541. if (nodeDelay < maxLatency)
  542. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, true));
  543. }
  544. for (int j = 1; j < sourceNodes.size(); ++j)
  545. {
  546. int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeCV,
  547. sourceNodes.getUnchecked(j),
  548. sourceOutputChans.getUnchecked(j));
  549. if (srcIndex >= 0)
  550. {
  551. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (j));
  552. if (nodeDelay < maxLatency)
  553. {
  554. const int bufferToDelay = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  555. renderingOps.add (new CopyChannelOp (srcIndex, bufferToDelay, true));
  556. renderingOps.add (new DelayChannelOp (bufferToDelay, maxLatency - nodeDelay, true));
  557. srcIndex = bufferToDelay;
  558. }
  559. renderingOps.add (new AddChannelOp (srcIndex, bufIndex, true));
  560. }
  561. }
  562. }
  563. CARLA_SAFE_ASSERT_CONTINUE (bufIndex >= 0);
  564. cvInChannelsToUse.add (bufIndex);
  565. markBufferAsContaining (AudioProcessor::ChannelTypeCV, bufIndex, node.nodeId, inputChan);
  566. }
  567. for (uint outputChan = 0; outputChan < numCVOuts; ++outputChan)
  568. {
  569. const int bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  570. CARLA_SAFE_ASSERT_CONTINUE (bufIndex > 0);
  571. cvOutChannelsToUse.add (bufIndex);
  572. markBufferAsContaining (AudioProcessor::ChannelTypeCV, bufIndex, node.nodeId, outputChan);
  573. }
  574. // Now the same thing for midi..
  575. Array<uint32> midiSourceNodes;
  576. for (int i = graph.getNumConnections(); --i >= 0;)
  577. {
  578. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  579. if (c->destNodeId == node.nodeId && c->channelType == AudioProcessor::ChannelTypeMIDI)
  580. midiSourceNodes.add (c->sourceNodeId);
  581. }
  582. if (midiSourceNodes.size() == 0)
  583. {
  584. // No midi inputs..
  585. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI); // need to pick a buffer even if the processor doesn't use midi
  586. if (processor.acceptsMidi() || processor.producesMidi())
  587. renderingOps.add (new ClearMidiBufferOp (midiBufferToUse));
  588. }
  589. else if (midiSourceNodes.size() == 1)
  590. {
  591. // One midi input..
  592. midiBufferToUse = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  593. midiSourceNodes.getUnchecked(0),
  594. 0);
  595. if (midiBufferToUse >= 0)
  596. {
  597. if (isBufferNeededLater (AudioProcessor::ChannelTypeMIDI,
  598. ourRenderingIndex, 0,
  599. midiSourceNodes.getUnchecked(0), 0))
  600. {
  601. // can't mess up this channel because it's needed later by another node, so we
  602. // need to use a copy of it..
  603. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeMIDI);
  604. renderingOps.add (new CopyMidiBufferOp (midiBufferToUse, newFreeBuffer));
  605. midiBufferToUse = newFreeBuffer;
  606. }
  607. }
  608. else
  609. {
  610. // probably a feedback loop, so just use an empty one..
  611. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI); // need to pick a buffer even if the processor doesn't use midi
  612. }
  613. }
  614. else
  615. {
  616. // More than one midi input being mixed..
  617. int reusableInputIndex = -1;
  618. for (int i = 0; i < midiSourceNodes.size(); ++i)
  619. {
  620. const int sourceBufIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  621. midiSourceNodes.getUnchecked(i),
  622. 0);
  623. if (sourceBufIndex >= 0
  624. && ! isBufferNeededLater (AudioProcessor::ChannelTypeMIDI,
  625. ourRenderingIndex, 0,
  626. midiSourceNodes.getUnchecked(i), 0))
  627. {
  628. // we've found one of our input buffers that can be re-used..
  629. reusableInputIndex = i;
  630. midiBufferToUse = sourceBufIndex;
  631. break;
  632. }
  633. }
  634. if (reusableInputIndex < 0)
  635. {
  636. // can't re-use any of our input buffers, so get a new one and copy everything into it..
  637. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI);
  638. wassert (midiBufferToUse >= 0);
  639. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  640. midiSourceNodes.getUnchecked(0),
  641. 0);
  642. if (srcIndex >= 0)
  643. renderingOps.add (new CopyMidiBufferOp (srcIndex, midiBufferToUse));
  644. else
  645. renderingOps.add (new ClearMidiBufferOp (midiBufferToUse));
  646. reusableInputIndex = 0;
  647. }
  648. for (int j = 0; j < midiSourceNodes.size(); ++j)
  649. {
  650. if (j != reusableInputIndex)
  651. {
  652. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  653. midiSourceNodes.getUnchecked(j),
  654. 0);
  655. if (srcIndex >= 0)
  656. renderingOps.add (new AddMidiBufferOp (srcIndex, midiBufferToUse));
  657. }
  658. }
  659. }
  660. if (processor.producesMidi())
  661. markBufferAsContaining (AudioProcessor::ChannelTypeMIDI,
  662. midiBufferToUse, node.nodeId,
  663. 0);
  664. setNodeDelay (node.nodeId, maxLatency + processor.getLatencySamples());
  665. if (numAudioOuts == 0)
  666. totalLatency = maxLatency;
  667. renderingOps.add (new ProcessBufferOp (&node,
  668. audioChannelsToUse,
  669. totalAudioChans,
  670. cvInChannelsToUse,
  671. cvOutChannelsToUse,
  672. midiBufferToUse));
  673. }
  674. //==============================================================================
  675. int getFreeBuffer (const AudioProcessor::ChannelType channelType)
  676. {
  677. switch (channelType)
  678. {
  679. case AudioProcessor::ChannelTypeAudio:
  680. for (int i = 1; i < audioNodeIds.size(); ++i)
  681. if (audioNodeIds.getUnchecked(i) == freeNodeID)
  682. return i;
  683. audioNodeIds.add ((uint32) freeNodeID);
  684. audioChannels.add (0);
  685. return audioNodeIds.size() - 1;
  686. case AudioProcessor::ChannelTypeCV:
  687. for (int i = 1; i < cvNodeIds.size(); ++i)
  688. if (cvNodeIds.getUnchecked(i) == freeNodeID)
  689. return i;
  690. cvNodeIds.add ((uint32) freeNodeID);
  691. cvChannels.add (0);
  692. return cvNodeIds.size() - 1;
  693. case AudioProcessor::ChannelTypeMIDI:
  694. for (int i = 1; i < midiNodeIds.size(); ++i)
  695. if (midiNodeIds.getUnchecked(i) == freeNodeID)
  696. return i;
  697. midiNodeIds.add ((uint32) freeNodeID);
  698. return midiNodeIds.size() - 1;
  699. }
  700. return -1;
  701. }
  702. int getReadOnlyEmptyBuffer() const noexcept
  703. {
  704. return 0;
  705. }
  706. int getBufferContaining (const AudioProcessor::ChannelType channelType,
  707. const uint32 nodeId,
  708. const uint outputChannel) const noexcept
  709. {
  710. switch (channelType)
  711. {
  712. case AudioProcessor::ChannelTypeAudio:
  713. for (int i = audioNodeIds.size(); --i >= 0;)
  714. if (audioNodeIds.getUnchecked(i) == nodeId && audioChannels.getUnchecked(i) == outputChannel)
  715. return i;
  716. break;
  717. case AudioProcessor::ChannelTypeCV:
  718. for (int i = cvNodeIds.size(); --i >= 0;)
  719. if (cvNodeIds.getUnchecked(i) == nodeId && cvChannels.getUnchecked(i) == outputChannel)
  720. return i;
  721. break;
  722. case AudioProcessor::ChannelTypeMIDI:
  723. for (int i = midiNodeIds.size(); --i >= 0;)
  724. {
  725. if (midiNodeIds.getUnchecked(i) == nodeId)
  726. return i;
  727. }
  728. break;
  729. }
  730. return -1;
  731. }
  732. void markAnyUnusedBuffersAsFree (const int stepIndex)
  733. {
  734. for (int i = 0; i < audioNodeIds.size(); ++i)
  735. {
  736. if (isNodeBusy (audioNodeIds.getUnchecked(i))
  737. && ! isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  738. stepIndex, -1,
  739. audioNodeIds.getUnchecked(i),
  740. audioChannels.getUnchecked(i)))
  741. {
  742. audioNodeIds.set (i, (uint32) freeNodeID);
  743. }
  744. }
  745. // NOTE: CV skipped on purpose
  746. for (int i = 0; i < midiNodeIds.size(); ++i)
  747. {
  748. if (isNodeBusy (midiNodeIds.getUnchecked(i))
  749. && ! isBufferNeededLater (AudioProcessor::ChannelTypeMIDI,
  750. stepIndex, -1,
  751. midiNodeIds.getUnchecked(i), 0))
  752. {
  753. midiNodeIds.set (i, (uint32) freeNodeID);
  754. }
  755. }
  756. }
  757. bool isBufferNeededLater (const AudioProcessor::ChannelType channelType,
  758. int stepIndexToSearchFrom,
  759. uint inputChannelOfIndexToIgnore,
  760. const uint32 nodeId,
  761. const uint outputChanIndex) const
  762. {
  763. while (stepIndexToSearchFrom < orderedNodes.size())
  764. {
  765. const AudioProcessorGraph::Node* const node = (const AudioProcessorGraph::Node*) orderedNodes.getUnchecked (stepIndexToSearchFrom);
  766. for (uint i = 0; i < node->getProcessor()->getTotalNumInputChannels(channelType); ++i)
  767. if (i != inputChannelOfIndexToIgnore
  768. && graph.getConnectionBetween (channelType,
  769. nodeId, outputChanIndex,
  770. node->nodeId, i) != nullptr)
  771. return true;
  772. inputChannelOfIndexToIgnore = (uint)-1;
  773. ++stepIndexToSearchFrom;
  774. }
  775. return false;
  776. }
  777. void markBufferAsContaining (const AudioProcessor::ChannelType channelType,
  778. int bufferNum, uint32 nodeId, int outputIndex)
  779. {
  780. switch (channelType)
  781. {
  782. case AudioProcessor::ChannelTypeAudio:
  783. CARLA_SAFE_ASSERT_BREAK (bufferNum >= 0 && bufferNum < audioNodeIds.size());
  784. audioNodeIds.set (bufferNum, nodeId);
  785. audioChannels.set (bufferNum, outputIndex);
  786. break;
  787. case AudioProcessor::ChannelTypeCV:
  788. CARLA_SAFE_ASSERT_BREAK (bufferNum >= 0 && bufferNum < cvNodeIds.size());
  789. cvNodeIds.set (bufferNum, nodeId);
  790. cvChannels.set (bufferNum, outputIndex);
  791. break;
  792. case AudioProcessor::ChannelTypeMIDI:
  793. CARLA_SAFE_ASSERT_BREAK (bufferNum > 0 && bufferNum < midiNodeIds.size());
  794. midiNodeIds.set (bufferNum, nodeId);
  795. break;
  796. }
  797. }
  798. CARLA_DECLARE_NON_COPY_CLASS (RenderingOpSequenceCalculator)
  799. };
  800. //==============================================================================
  801. // Holds a fast lookup table for checking which nodes are inputs to others.
  802. class ConnectionLookupTable
  803. {
  804. public:
  805. explicit ConnectionLookupTable (const OwnedArray<AudioProcessorGraph::Connection>& connections)
  806. {
  807. for (int i = 0; i < static_cast<int>(connections.size()); ++i)
  808. {
  809. const AudioProcessorGraph::Connection* const c = connections.getUnchecked(i);
  810. int index;
  811. Entry* entry = findEntry (c->destNodeId, index);
  812. if (entry == nullptr)
  813. {
  814. entry = new Entry (c->destNodeId);
  815. entries.insert (index, entry);
  816. }
  817. entry->srcNodes.add (c->sourceNodeId);
  818. }
  819. }
  820. bool isAnInputTo (const uint32 possibleInputId,
  821. const uint32 possibleDestinationId) const noexcept
  822. {
  823. return isAnInputToRecursive (possibleInputId, possibleDestinationId, entries.size());
  824. }
  825. private:
  826. //==============================================================================
  827. struct Entry
  828. {
  829. explicit Entry (const uint32 destNodeId_) noexcept : destNodeId (destNodeId_) {}
  830. const uint32 destNodeId;
  831. SortedSet<uint32> srcNodes;
  832. CARLA_DECLARE_NON_COPY_CLASS (Entry)
  833. };
  834. OwnedArray<Entry> entries;
  835. bool isAnInputToRecursive (const uint32 possibleInputId,
  836. const uint32 possibleDestinationId,
  837. int recursionCheck) const noexcept
  838. {
  839. int index;
  840. if (const Entry* const entry = findEntry (possibleDestinationId, index))
  841. {
  842. const SortedSet<uint32>& srcNodes = entry->srcNodes;
  843. if (srcNodes.contains (possibleInputId))
  844. return true;
  845. if (--recursionCheck >= 0)
  846. {
  847. for (int i = 0; i < srcNodes.size(); ++i)
  848. if (isAnInputToRecursive (possibleInputId, srcNodes.getUnchecked(i), recursionCheck))
  849. return true;
  850. }
  851. }
  852. return false;
  853. }
  854. Entry* findEntry (const uint32 destNodeId, int& insertIndex) const noexcept
  855. {
  856. Entry* result = nullptr;
  857. int start = 0;
  858. int end = entries.size();
  859. for (;;)
  860. {
  861. if (start >= end)
  862. {
  863. break;
  864. }
  865. else if (destNodeId == entries.getUnchecked (start)->destNodeId)
  866. {
  867. result = entries.getUnchecked (start);
  868. break;
  869. }
  870. else
  871. {
  872. const int halfway = (start + end) / 2;
  873. if (halfway == start)
  874. {
  875. if (destNodeId >= entries.getUnchecked (halfway)->destNodeId)
  876. ++start;
  877. break;
  878. }
  879. else if (destNodeId >= entries.getUnchecked (halfway)->destNodeId)
  880. start = halfway;
  881. else
  882. end = halfway;
  883. }
  884. }
  885. insertIndex = start;
  886. return result;
  887. }
  888. CARLA_DECLARE_NON_COPY_CLASS (ConnectionLookupTable)
  889. };
  890. //==============================================================================
  891. struct ConnectionSorter
  892. {
  893. static int compareElements (const AudioProcessorGraph::Connection* const first,
  894. const AudioProcessorGraph::Connection* const second) noexcept
  895. {
  896. if (first->sourceNodeId < second->sourceNodeId) return -1;
  897. if (first->sourceNodeId > second->sourceNodeId) return 1;
  898. if (first->destNodeId < second->destNodeId) return -1;
  899. if (first->destNodeId > second->destNodeId) return 1;
  900. if (first->sourceChannelIndex < second->sourceChannelIndex) return -1;
  901. if (first->sourceChannelIndex > second->sourceChannelIndex) return 1;
  902. if (first->destChannelIndex < second->destChannelIndex) return -1;
  903. if (first->destChannelIndex > second->destChannelIndex) return 1;
  904. return 0;
  905. }
  906. };
  907. }
  908. //==============================================================================
  909. AudioProcessorGraph::Connection::Connection (ChannelType ct,
  910. const uint32 sourceID, const uint sourceChannel,
  911. const uint32 destID, const uint destChannel) noexcept
  912. : channelType (ct),
  913. sourceNodeId (sourceID), sourceChannelIndex (sourceChannel),
  914. destNodeId (destID), destChannelIndex (destChannel)
  915. {
  916. }
  917. //==============================================================================
  918. AudioProcessorGraph::Node::Node (const uint32 nodeID, AudioProcessor* const p) noexcept
  919. : nodeId (nodeID), processor (p), isPrepared (false)
  920. {
  921. wassert (processor != nullptr);
  922. }
  923. void AudioProcessorGraph::Node::prepare (const double newSampleRate, const int newBlockSize,
  924. AudioProcessorGraph* const graph)
  925. {
  926. if (! isPrepared)
  927. {
  928. setParentGraph (graph);
  929. processor->setRateAndBufferSizeDetails (newSampleRate, newBlockSize);
  930. processor->prepareToPlay (newSampleRate, newBlockSize);
  931. isPrepared = true;
  932. }
  933. }
  934. void AudioProcessorGraph::Node::unprepare()
  935. {
  936. if (isPrepared)
  937. {
  938. isPrepared = false;
  939. processor->releaseResources();
  940. }
  941. }
  942. void AudioProcessorGraph::Node::setParentGraph (AudioProcessorGraph* const graph) const
  943. {
  944. if (AudioProcessorGraph::AudioGraphIOProcessor* const ioProc
  945. = dynamic_cast<AudioProcessorGraph::AudioGraphIOProcessor*> (processor.get()))
  946. ioProc->setParentGraph (graph);
  947. }
  948. //==============================================================================
  949. struct AudioProcessorGraph::AudioProcessorGraphBufferHelpers
  950. {
  951. AudioProcessorGraphBufferHelpers() noexcept
  952. : currentAudioInputBuffer (nullptr),
  953. currentCVInputBuffer (nullptr) {}
  954. void setRenderingBufferSize (int newNumAudioChannels, int newNumCVChannels, int newNumSamples) noexcept
  955. {
  956. renderingAudioBuffers.setSize (newNumAudioChannels, newNumSamples);
  957. renderingAudioBuffers.clear();
  958. renderingCVBuffers.setSize (newNumCVChannels, newNumSamples);
  959. renderingCVBuffers.clear();
  960. }
  961. void release() noexcept
  962. {
  963. renderingAudioBuffers.setSize (1, 1);
  964. currentAudioInputBuffer = nullptr;
  965. currentCVInputBuffer = nullptr;
  966. currentAudioOutputBuffer.setSize (1, 1);
  967. currentCVOutputBuffer.setSize (1, 1);
  968. renderingCVBuffers.setSize (1, 1);
  969. }
  970. void prepareInOutBuffers (int newNumAudioChannels, int newNumCVChannels, int newNumSamples) noexcept
  971. {
  972. currentAudioInputBuffer = nullptr;
  973. currentCVInputBuffer = nullptr;
  974. currentAudioOutputBuffer.setSize (newNumAudioChannels, newNumSamples);
  975. currentCVOutputBuffer.setSize (newNumCVChannels, newNumSamples);
  976. }
  977. AudioSampleBuffer renderingAudioBuffers;
  978. AudioSampleBuffer renderingCVBuffers;
  979. AudioSampleBuffer* currentAudioInputBuffer;
  980. const AudioSampleBuffer* currentCVInputBuffer;
  981. AudioSampleBuffer currentAudioOutputBuffer;
  982. AudioSampleBuffer currentCVOutputBuffer;
  983. };
  984. //==============================================================================
  985. AudioProcessorGraph::AudioProcessorGraph()
  986. : lastNodeId (0), audioAndCVBuffers (new AudioProcessorGraphBufferHelpers),
  987. currentMidiInputBuffer (nullptr), isPrepared (false), needsReorder (false)
  988. {
  989. }
  990. AudioProcessorGraph::~AudioProcessorGraph()
  991. {
  992. clearRenderingSequence();
  993. clear();
  994. }
  995. const String AudioProcessorGraph::getName() const
  996. {
  997. return "Audio Graph";
  998. }
  999. //==============================================================================
  1000. void AudioProcessorGraph::clear()
  1001. {
  1002. nodes.clear();
  1003. connections.clear();
  1004. needsReorder = true;
  1005. }
  1006. AudioProcessorGraph::Node* AudioProcessorGraph::getNodeForId (const uint32 nodeId) const
  1007. {
  1008. for (int i = nodes.size(); --i >= 0;)
  1009. if (nodes.getUnchecked(i)->nodeId == nodeId)
  1010. return nodes.getUnchecked(i);
  1011. return nullptr;
  1012. }
  1013. AudioProcessorGraph::Node* AudioProcessorGraph::addNode (AudioProcessor* const newProcessor, uint32 nodeId)
  1014. {
  1015. CARLA_SAFE_ASSERT_RETURN (newProcessor != nullptr && newProcessor != this, nullptr);
  1016. for (int i = nodes.size(); --i >= 0;)
  1017. {
  1018. CARLA_SAFE_ASSERT_RETURN (nodes.getUnchecked(i)->getProcessor() != newProcessor, nullptr);
  1019. }
  1020. if (nodeId == 0)
  1021. {
  1022. nodeId = ++lastNodeId;
  1023. }
  1024. else
  1025. {
  1026. // you can't add a node with an id that already exists in the graph..
  1027. CARLA_SAFE_ASSERT_RETURN (getNodeForId (nodeId) == nullptr, nullptr);
  1028. removeNode (nodeId);
  1029. if (nodeId > lastNodeId)
  1030. lastNodeId = nodeId;
  1031. }
  1032. Node* const n = new Node (nodeId, newProcessor);
  1033. nodes.add (n);
  1034. if (isPrepared)
  1035. needsReorder = true;
  1036. n->setParentGraph (this);
  1037. return n;
  1038. }
  1039. bool AudioProcessorGraph::removeNode (const uint32 nodeId)
  1040. {
  1041. disconnectNode (nodeId);
  1042. for (int i = nodes.size(); --i >= 0;)
  1043. {
  1044. if (nodes.getUnchecked(i)->nodeId == nodeId)
  1045. {
  1046. nodes.remove (i);
  1047. if (isPrepared)
  1048. needsReorder = true;
  1049. return true;
  1050. }
  1051. }
  1052. return false;
  1053. }
  1054. bool AudioProcessorGraph::removeNode (Node* node)
  1055. {
  1056. CARLA_SAFE_ASSERT_RETURN(node != nullptr, false);
  1057. return removeNode (node->nodeId);
  1058. }
  1059. //==============================================================================
  1060. const AudioProcessorGraph::Connection* AudioProcessorGraph::getConnectionBetween (const ChannelType ct,
  1061. const uint32 sourceNodeId,
  1062. const uint sourceChannelIndex,
  1063. const uint32 destNodeId,
  1064. const uint destChannelIndex) const
  1065. {
  1066. const Connection c (ct, sourceNodeId, sourceChannelIndex, destNodeId, destChannelIndex);
  1067. GraphRenderingOps::ConnectionSorter sorter;
  1068. return connections [connections.indexOfSorted (sorter, &c)];
  1069. }
  1070. bool AudioProcessorGraph::isConnected (const uint32 possibleSourceNodeId,
  1071. const uint32 possibleDestNodeId) const
  1072. {
  1073. for (int i = connections.size(); --i >= 0;)
  1074. {
  1075. const Connection* const c = connections.getUnchecked(i);
  1076. if (c->sourceNodeId == possibleSourceNodeId
  1077. && c->destNodeId == possibleDestNodeId)
  1078. {
  1079. return true;
  1080. }
  1081. }
  1082. return false;
  1083. }
  1084. bool AudioProcessorGraph::canConnect (ChannelType ct,
  1085. const uint32 sourceNodeId,
  1086. const uint sourceChannelIndex,
  1087. const uint32 destNodeId,
  1088. const uint destChannelIndex) const
  1089. {
  1090. if (sourceNodeId == destNodeId)
  1091. return false;
  1092. const Node* const source = getNodeForId (sourceNodeId);
  1093. if (source == nullptr
  1094. || (ct != ChannelTypeMIDI && sourceChannelIndex >= source->processor->getTotalNumOutputChannels(ct))
  1095. || (ct == ChannelTypeMIDI && ! source->processor->producesMidi()))
  1096. return false;
  1097. const Node* const dest = getNodeForId (destNodeId);
  1098. if (dest == nullptr
  1099. || (ct != ChannelTypeMIDI && destChannelIndex >= dest->processor->getTotalNumInputChannels(ct))
  1100. || (ct == ChannelTypeMIDI && ! dest->processor->acceptsMidi()))
  1101. return false;
  1102. return getConnectionBetween (ct,
  1103. sourceNodeId, sourceChannelIndex,
  1104. destNodeId, destChannelIndex) == nullptr;
  1105. }
  1106. bool AudioProcessorGraph::addConnection (const ChannelType ct,
  1107. const uint32 sourceNodeId,
  1108. const uint sourceChannelIndex,
  1109. const uint32 destNodeId,
  1110. const uint destChannelIndex)
  1111. {
  1112. if (! canConnect (ct, sourceNodeId, sourceChannelIndex, destNodeId, destChannelIndex))
  1113. return false;
  1114. GraphRenderingOps::ConnectionSorter sorter;
  1115. connections.addSorted (sorter, new Connection (ct,
  1116. sourceNodeId, sourceChannelIndex,
  1117. destNodeId, destChannelIndex));
  1118. if (isPrepared)
  1119. needsReorder = true;
  1120. return true;
  1121. }
  1122. void AudioProcessorGraph::removeConnection (const int index)
  1123. {
  1124. connections.remove (index);
  1125. if (isPrepared)
  1126. needsReorder = true;
  1127. }
  1128. bool AudioProcessorGraph::removeConnection (const ChannelType ct,
  1129. const uint32 sourceNodeId, const uint sourceChannelIndex,
  1130. const uint32 destNodeId, const uint destChannelIndex)
  1131. {
  1132. bool doneAnything = false;
  1133. for (int i = connections.size(); --i >= 0;)
  1134. {
  1135. const Connection* const c = connections.getUnchecked(i);
  1136. if (c->channelType == ct
  1137. && c->sourceNodeId == sourceNodeId
  1138. && c->destNodeId == destNodeId
  1139. && c->sourceChannelIndex == sourceChannelIndex
  1140. && c->destChannelIndex == destChannelIndex)
  1141. {
  1142. removeConnection (i);
  1143. doneAnything = true;
  1144. }
  1145. }
  1146. return doneAnything;
  1147. }
  1148. bool AudioProcessorGraph::disconnectNode (const uint32 nodeId)
  1149. {
  1150. bool doneAnything = false;
  1151. for (int i = connections.size(); --i >= 0;)
  1152. {
  1153. const Connection* const c = connections.getUnchecked(i);
  1154. if (c->sourceNodeId == nodeId || c->destNodeId == nodeId)
  1155. {
  1156. removeConnection (i);
  1157. doneAnything = true;
  1158. }
  1159. }
  1160. return doneAnything;
  1161. }
  1162. bool AudioProcessorGraph::isConnectionLegal (const Connection* const c) const
  1163. {
  1164. CARLA_SAFE_ASSERT_RETURN (c != nullptr, false);
  1165. const Node* const source = getNodeForId (c->sourceNodeId);
  1166. const Node* const dest = getNodeForId (c->destNodeId);
  1167. return source != nullptr
  1168. && dest != nullptr
  1169. && (c->channelType != ChannelTypeMIDI ? (c->sourceChannelIndex < source->processor->getTotalNumOutputChannels(c->channelType))
  1170. : source->processor->producesMidi())
  1171. && (c->channelType != ChannelTypeMIDI ? (c->destChannelIndex < dest->processor->getTotalNumInputChannels(c->channelType))
  1172. : dest->processor->acceptsMidi());
  1173. }
  1174. bool AudioProcessorGraph::removeIllegalConnections()
  1175. {
  1176. bool doneAnything = false;
  1177. for (int i = connections.size(); --i >= 0;)
  1178. {
  1179. if (! isConnectionLegal (connections.getUnchecked(i)))
  1180. {
  1181. removeConnection (i);
  1182. doneAnything = true;
  1183. }
  1184. }
  1185. return doneAnything;
  1186. }
  1187. //==============================================================================
  1188. static void deleteRenderOpArray (Array<void*>& ops)
  1189. {
  1190. for (int i = ops.size(); --i >= 0;)
  1191. delete static_cast<GraphRenderingOps::AudioGraphRenderingOpBase*> (ops.getUnchecked(i));
  1192. }
  1193. void AudioProcessorGraph::clearRenderingSequence()
  1194. {
  1195. Array<void*> oldOps;
  1196. {
  1197. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1198. renderingOps.swapWith (oldOps);
  1199. }
  1200. deleteRenderOpArray (oldOps);
  1201. }
  1202. bool AudioProcessorGraph::isAnInputTo (const uint32 possibleInputId,
  1203. const uint32 possibleDestinationId,
  1204. const int recursionCheck) const
  1205. {
  1206. if (recursionCheck > 0)
  1207. {
  1208. for (int i = connections.size(); --i >= 0;)
  1209. {
  1210. const AudioProcessorGraph::Connection* const c = connections.getUnchecked (i);
  1211. if (c->destNodeId == possibleDestinationId
  1212. && (c->sourceNodeId == possibleInputId
  1213. || isAnInputTo (possibleInputId, c->sourceNodeId, recursionCheck - 1)))
  1214. return true;
  1215. }
  1216. }
  1217. return false;
  1218. }
  1219. void AudioProcessorGraph::buildRenderingSequence()
  1220. {
  1221. Array<void*> newRenderingOps;
  1222. int numAudioRenderingBuffersNeeded = 2;
  1223. int numCVRenderingBuffersNeeded = 0;
  1224. int numMidiBuffersNeeded = 1;
  1225. {
  1226. const CarlaRecursiveMutexLocker cml (reorderMutex);
  1227. Array<Node*> orderedNodes;
  1228. {
  1229. const GraphRenderingOps::ConnectionLookupTable table (connections);
  1230. for (int i = 0; i < nodes.size(); ++i)
  1231. {
  1232. Node* const node = nodes.getUnchecked(i);
  1233. node->prepare (getSampleRate(), getBlockSize(), this);
  1234. int j = 0;
  1235. for (; j < orderedNodes.size(); ++j)
  1236. if (table.isAnInputTo (node->nodeId, ((Node*) orderedNodes.getUnchecked(j))->nodeId))
  1237. break;
  1238. orderedNodes.insert (j, node);
  1239. }
  1240. }
  1241. GraphRenderingOps::RenderingOpSequenceCalculator calculator (*this, orderedNodes, newRenderingOps);
  1242. numAudioRenderingBuffersNeeded = calculator.getNumAudioBuffersNeeded();
  1243. numCVRenderingBuffersNeeded = calculator.getNumCVBuffersNeeded();
  1244. numMidiBuffersNeeded = calculator.getNumMidiBuffersNeeded();
  1245. }
  1246. {
  1247. // swap over to the new rendering sequence..
  1248. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1249. audioAndCVBuffers->setRenderingBufferSize (numAudioRenderingBuffersNeeded,
  1250. numCVRenderingBuffersNeeded,
  1251. getBlockSize());
  1252. for (int i = static_cast<int>(midiBuffers.size()); --i >= 0;)
  1253. midiBuffers.getUnchecked(i)->clear();
  1254. while (static_cast<int>(midiBuffers.size()) < numMidiBuffersNeeded)
  1255. midiBuffers.add (new MidiBuffer());
  1256. renderingOps.swapWith (newRenderingOps);
  1257. }
  1258. // delete the old ones..
  1259. deleteRenderOpArray (newRenderingOps);
  1260. }
  1261. //==============================================================================
  1262. void AudioProcessorGraph::prepareToPlay (double sampleRate, int estimatedSamplesPerBlock)
  1263. {
  1264. setRateAndBufferSizeDetails(sampleRate, estimatedSamplesPerBlock);
  1265. audioAndCVBuffers->prepareInOutBuffers(jmax(1U, getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio)),
  1266. jmax(1U, getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV)),
  1267. estimatedSamplesPerBlock);
  1268. currentMidiInputBuffer = nullptr;
  1269. currentMidiOutputBuffer.clear();
  1270. clearRenderingSequence();
  1271. buildRenderingSequence();
  1272. isPrepared = true;
  1273. }
  1274. void AudioProcessorGraph::releaseResources()
  1275. {
  1276. isPrepared = false;
  1277. for (int i = 0; i < nodes.size(); ++i)
  1278. nodes.getUnchecked(i)->unprepare();
  1279. audioAndCVBuffers->release();
  1280. midiBuffers.clear();
  1281. currentMidiInputBuffer = nullptr;
  1282. currentMidiOutputBuffer.clear();
  1283. }
  1284. void AudioProcessorGraph::reset()
  1285. {
  1286. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1287. for (int i = 0; i < nodes.size(); ++i)
  1288. nodes.getUnchecked(i)->getProcessor()->reset();
  1289. }
  1290. void AudioProcessorGraph::setNonRealtime (bool isProcessingNonRealtime) noexcept
  1291. {
  1292. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1293. AudioProcessor::setNonRealtime (isProcessingNonRealtime);
  1294. for (int i = 0; i < nodes.size(); ++i)
  1295. nodes.getUnchecked(i)->getProcessor()->setNonRealtime (isProcessingNonRealtime);
  1296. }
  1297. /*
  1298. void AudioProcessorGraph::processAudio (AudioSampleBuffer& audioBuffer, MidiBuffer& midiMessages)
  1299. {
  1300. AudioSampleBuffer*& currentAudioInputBuffer = audioAndCVBuffers->currentAudioInputBuffer;
  1301. AudioSampleBuffer& currentAudioOutputBuffer = audioAndCVBuffers->currentAudioOutputBuffer;
  1302. AudioSampleBuffer& renderingAudioBuffers = audioAndCVBuffers->renderingAudioBuffers;
  1303. AudioSampleBuffer& renderingCVBuffers = audioAndCVBuffers->renderingCVBuffers;
  1304. const int numSamples = audioBuffer.getNumSamples();
  1305. if (! audioAndCVBuffers->currentAudioOutputBuffer.setSizeRT(numSamples))
  1306. return;
  1307. if (! audioAndCVBuffers->renderingAudioBuffers.setSizeRT(numSamples))
  1308. return;
  1309. if (! audioAndCVBuffers->renderingCVBuffers.setSizeRT(numSamples))
  1310. return;
  1311. currentAudioInputBuffer = &audioBuffer;
  1312. currentAudioOutputBuffer.clear();
  1313. currentMidiInputBuffer = &midiMessages;
  1314. currentMidiOutputBuffer.clear();
  1315. for (int i = 0; i < renderingOps.size(); ++i)
  1316. {
  1317. GraphRenderingOps::AudioGraphRenderingOpBase* const op
  1318. = (GraphRenderingOps::AudioGraphRenderingOpBase*) renderingOps.getUnchecked(i);
  1319. op->perform (renderingAudioBuffers, renderingCVBuffers, midiBuffers, numSamples);
  1320. }
  1321. for (uint32_t i = 0; i < audioBuffer.getNumChannels(); ++i)
  1322. audioBuffer.copyFrom (i, 0, currentAudioOutputBuffer, i, 0, numSamples);
  1323. midiMessages.clear();
  1324. midiMessages.addEvents (currentMidiOutputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1325. }
  1326. */
  1327. void AudioProcessorGraph::processAudioAndCV (AudioSampleBuffer& audioBuffer,
  1328. const AudioSampleBuffer& cvInBuffer,
  1329. AudioSampleBuffer& cvOutBuffer,
  1330. MidiBuffer& midiMessages)
  1331. {
  1332. AudioSampleBuffer*& currentAudioInputBuffer = audioAndCVBuffers->currentAudioInputBuffer;
  1333. const AudioSampleBuffer*& currentCVInputBuffer = audioAndCVBuffers->currentCVInputBuffer;
  1334. AudioSampleBuffer& currentAudioOutputBuffer = audioAndCVBuffers->currentAudioOutputBuffer;
  1335. AudioSampleBuffer& currentCVOutputBuffer = audioAndCVBuffers->currentCVOutputBuffer;
  1336. AudioSampleBuffer& renderingAudioBuffers = audioAndCVBuffers->renderingAudioBuffers;
  1337. AudioSampleBuffer& renderingCVBuffers = audioAndCVBuffers->renderingCVBuffers;
  1338. const int numSamples = audioBuffer.getNumSamples();
  1339. if (! audioAndCVBuffers->currentAudioOutputBuffer.setSizeRT(numSamples))
  1340. return;
  1341. if (! audioAndCVBuffers->currentCVOutputBuffer.setSizeRT(numSamples))
  1342. return;
  1343. if (! audioAndCVBuffers->renderingAudioBuffers.setSizeRT(numSamples))
  1344. return;
  1345. if (! audioAndCVBuffers->renderingCVBuffers.setSizeRT(numSamples))
  1346. return;
  1347. currentAudioInputBuffer = &audioBuffer;
  1348. currentCVInputBuffer = &cvInBuffer;
  1349. currentMidiInputBuffer = &midiMessages;
  1350. currentAudioOutputBuffer.clear();
  1351. currentCVOutputBuffer.clear();
  1352. currentMidiOutputBuffer.clear();
  1353. for (int i = 0; i < renderingOps.size(); ++i)
  1354. {
  1355. GraphRenderingOps::AudioGraphRenderingOpBase* const op
  1356. = (GraphRenderingOps::AudioGraphRenderingOpBase*) renderingOps.getUnchecked(i);
  1357. op->perform (renderingAudioBuffers, renderingCVBuffers, midiBuffers, numSamples);
  1358. }
  1359. for (uint32_t i = 0; i < audioBuffer.getNumChannels(); ++i)
  1360. audioBuffer.copyFrom (i, 0, currentAudioOutputBuffer, i, 0, numSamples);
  1361. for (uint32_t i = 0; i < cvOutBuffer.getNumChannels(); ++i)
  1362. cvOutBuffer.copyFrom (i, 0, currentCVOutputBuffer, i, 0, numSamples);
  1363. midiMessages.clear();
  1364. midiMessages.addEvents (currentMidiOutputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1365. }
  1366. bool AudioProcessorGraph::acceptsMidi() const { return true; }
  1367. bool AudioProcessorGraph::producesMidi() const { return true; }
  1368. /*
  1369. void AudioProcessorGraph::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
  1370. {
  1371. processAudio (buffer, midiMessages);
  1372. }
  1373. */
  1374. void AudioProcessorGraph::processBlockWithCV (AudioSampleBuffer& audioBuffer,
  1375. const AudioSampleBuffer& cvInBuffer,
  1376. AudioSampleBuffer& cvOutBuffer,
  1377. MidiBuffer& midiMessages)
  1378. {
  1379. processAudioAndCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  1380. }
  1381. void AudioProcessorGraph::reorderNowIfNeeded()
  1382. {
  1383. if (needsReorder)
  1384. {
  1385. needsReorder = false;
  1386. buildRenderingSequence();
  1387. }
  1388. }
  1389. const CarlaRecursiveMutex& AudioProcessorGraph::getReorderMutex() const
  1390. {
  1391. return reorderMutex;
  1392. }
  1393. //==============================================================================
  1394. AudioProcessorGraph::AudioGraphIOProcessor::AudioGraphIOProcessor (const IODeviceType deviceType)
  1395. : type (deviceType), graph (nullptr)
  1396. {
  1397. }
  1398. AudioProcessorGraph::AudioGraphIOProcessor::~AudioGraphIOProcessor()
  1399. {
  1400. }
  1401. const String AudioProcessorGraph::AudioGraphIOProcessor::getName() const
  1402. {
  1403. switch (type)
  1404. {
  1405. case audioOutputNode: return "Audio Output";
  1406. case audioInputNode: return "Audio Input";
  1407. case cvOutputNode: return "CV Output";
  1408. case cvInputNode: return "CV Input";
  1409. case midiOutputNode: return "Midi Output";
  1410. case midiInputNode: return "Midi Input";
  1411. default: break;
  1412. }
  1413. return String();
  1414. }
  1415. void AudioProcessorGraph::AudioGraphIOProcessor::prepareToPlay (double, int)
  1416. {
  1417. CARLA_SAFE_ASSERT (graph != nullptr);
  1418. }
  1419. void AudioProcessorGraph::AudioGraphIOProcessor::releaseResources()
  1420. {
  1421. }
  1422. void AudioProcessorGraph::AudioGraphIOProcessor::processAudioAndCV (AudioSampleBuffer& audioBuffer,
  1423. const AudioSampleBuffer& cvInBuffer,
  1424. AudioSampleBuffer& cvOutBuffer,
  1425. MidiBuffer& midiMessages)
  1426. {
  1427. CARLA_SAFE_ASSERT_RETURN(graph != nullptr,);
  1428. switch (type)
  1429. {
  1430. case audioOutputNode:
  1431. {
  1432. AudioSampleBuffer& currentAudioOutputBuffer =
  1433. graph->audioAndCVBuffers->currentAudioOutputBuffer;
  1434. for (int i = jmin (currentAudioOutputBuffer.getNumChannels(),
  1435. audioBuffer.getNumChannels()); --i >= 0;)
  1436. {
  1437. currentAudioOutputBuffer.addFrom (i, 0, audioBuffer, i, 0, audioBuffer.getNumSamples());
  1438. }
  1439. break;
  1440. }
  1441. case audioInputNode:
  1442. {
  1443. AudioSampleBuffer*& currentAudioInputBuffer =
  1444. graph->audioAndCVBuffers->currentAudioInputBuffer;
  1445. for (int i = jmin (currentAudioInputBuffer->getNumChannels(),
  1446. audioBuffer.getNumChannels()); --i >= 0;)
  1447. {
  1448. audioBuffer.copyFrom (i, 0, *currentAudioInputBuffer, i, 0, audioBuffer.getNumSamples());
  1449. }
  1450. break;
  1451. }
  1452. case cvOutputNode:
  1453. {
  1454. AudioSampleBuffer& currentCVOutputBuffer =
  1455. graph->audioAndCVBuffers->currentCVOutputBuffer;
  1456. for (int i = jmin (currentCVOutputBuffer.getNumChannels(),
  1457. cvInBuffer.getNumChannels()); --i >= 0;)
  1458. {
  1459. currentCVOutputBuffer.addFrom (i, 0, cvInBuffer, i, 0, cvInBuffer.getNumSamples());
  1460. }
  1461. break;
  1462. }
  1463. case cvInputNode:
  1464. {
  1465. const AudioSampleBuffer*& currentCVInputBuffer =
  1466. graph->audioAndCVBuffers->currentCVInputBuffer;
  1467. for (int i = jmin (currentCVInputBuffer->getNumChannels(),
  1468. cvOutBuffer.getNumChannels()); --i >= 0;)
  1469. {
  1470. cvOutBuffer.copyFrom (i, 0, *currentCVInputBuffer, i, 0, cvOutBuffer.getNumSamples());
  1471. }
  1472. break;
  1473. }
  1474. case midiOutputNode:
  1475. graph->currentMidiOutputBuffer.addEvents (midiMessages, 0, audioBuffer.getNumSamples(), 0);
  1476. break;
  1477. case midiInputNode:
  1478. midiMessages.addEvents (*graph->currentMidiInputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1479. break;
  1480. default:
  1481. break;
  1482. }
  1483. }
  1484. void AudioProcessorGraph::AudioGraphIOProcessor::processBlockWithCV (AudioSampleBuffer& audioBuffer,
  1485. const AudioSampleBuffer& cvInBuffer,
  1486. AudioSampleBuffer& cvOutBuffer,
  1487. MidiBuffer& midiMessages)
  1488. {
  1489. processAudioAndCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  1490. }
  1491. bool AudioProcessorGraph::AudioGraphIOProcessor::acceptsMidi() const
  1492. {
  1493. return type == midiOutputNode;
  1494. }
  1495. bool AudioProcessorGraph::AudioGraphIOProcessor::producesMidi() const
  1496. {
  1497. return type == midiInputNode;
  1498. }
  1499. bool AudioProcessorGraph::AudioGraphIOProcessor::isInput() const noexcept
  1500. {
  1501. return type == audioInputNode || type == cvInputNode || type == midiInputNode;
  1502. }
  1503. bool AudioProcessorGraph::AudioGraphIOProcessor::isOutput() const noexcept
  1504. {
  1505. return type == audioOutputNode || type == cvOutputNode || type == midiOutputNode;
  1506. }
  1507. void AudioProcessorGraph::AudioGraphIOProcessor::setParentGraph (AudioProcessorGraph* const newGraph)
  1508. {
  1509. graph = newGraph;
  1510. if (graph != nullptr)
  1511. {
  1512. setPlayConfigDetails (type == audioOutputNode
  1513. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio)
  1514. : 0,
  1515. type == audioInputNode
  1516. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeAudio)
  1517. : 0,
  1518. type == cvOutputNode
  1519. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV)
  1520. : 0,
  1521. type == cvInputNode
  1522. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeCV)
  1523. : 0,
  1524. type == midiOutputNode
  1525. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeMIDI)
  1526. : 0,
  1527. type == midiInputNode
  1528. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeMIDI)
  1529. : 0,
  1530. getSampleRate(),
  1531. getBlockSize());
  1532. }
  1533. }
  1534. }