Audio plugin host https://kx.studio/carla
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1880 lines
70KB

  1. /*
  2. ==============================================================================
  3. This file is part of the Water library.
  4. Copyright (c) 2015 ROLI Ltd.
  5. Copyright (C) 2017-2018 Filipe Coelho <falktx@falktx.com>
  6. Permission is granted to use this software under the terms of the GNU
  7. General Public License as published by the Free Software Foundation;
  8. either version 2 of the License, or any later version.
  9. This program is distributed in the hope that it will be useful, but WITHOUT
  10. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  11. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  12. For a full copy of the GNU General Public License see the doc/GPL.txt file.
  13. ==============================================================================
  14. */
  15. #include "AudioProcessorGraph.h"
  16. #include "../containers/SortedSet.h"
  17. namespace water {
  18. //==============================================================================
  19. namespace GraphRenderingOps
  20. {
  21. struct AudioGraphRenderingOpBase
  22. {
  23. AudioGraphRenderingOpBase() noexcept {}
  24. virtual ~AudioGraphRenderingOpBase() {}
  25. virtual void perform (AudioSampleBuffer& sharedAudioBufferChans,
  26. AudioSampleBuffer& sharedCVBufferChans,
  27. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  28. const int numSamples) = 0;
  29. };
  30. // use CRTP
  31. template <class Child>
  32. struct AudioGraphRenderingOp : public AudioGraphRenderingOpBase
  33. {
  34. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  35. AudioSampleBuffer& sharedCVBufferChans,
  36. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  37. const int numSamples) override
  38. {
  39. static_cast<Child*> (this)->perform (sharedAudioBufferChans,
  40. sharedCVBufferChans,
  41. sharedMidiBuffers,
  42. numSamples);
  43. }
  44. };
  45. //==============================================================================
  46. struct ClearChannelOp : public AudioGraphRenderingOp<ClearChannelOp>
  47. {
  48. ClearChannelOp (const int channel, const bool cv) noexcept
  49. : channelNum (channel), isCV (cv) {}
  50. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  51. AudioSampleBuffer& sharedCVBufferChans,
  52. const OwnedArray<MidiBuffer>&,
  53. const int numSamples)
  54. {
  55. if (isCV)
  56. sharedCVBufferChans.clear (channelNum, 0, numSamples);
  57. else
  58. sharedAudioBufferChans.clear (channelNum, 0, numSamples);
  59. }
  60. const int channelNum;
  61. const bool isCV;
  62. CARLA_DECLARE_NON_COPY_CLASS (ClearChannelOp)
  63. };
  64. //==============================================================================
  65. struct CopyChannelOp : public AudioGraphRenderingOp<CopyChannelOp>
  66. {
  67. CopyChannelOp (const int srcChan, const int dstChan, const bool cv) noexcept
  68. : srcChannelNum (srcChan), dstChannelNum (dstChan), isCV (cv) {}
  69. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  70. AudioSampleBuffer& sharedCVBufferChans,
  71. const OwnedArray<MidiBuffer>&,
  72. const int numSamples)
  73. {
  74. if (isCV)
  75. sharedCVBufferChans.copyFrom (dstChannelNum, 0, sharedCVBufferChans, srcChannelNum, 0, numSamples);
  76. else
  77. sharedAudioBufferChans.copyFrom (dstChannelNum, 0, sharedAudioBufferChans, srcChannelNum, 0, numSamples);
  78. }
  79. const int srcChannelNum, dstChannelNum;
  80. const bool isCV;
  81. CARLA_DECLARE_NON_COPY_CLASS (CopyChannelOp)
  82. };
  83. //==============================================================================
  84. struct AddChannelOp : public AudioGraphRenderingOp<AddChannelOp>
  85. {
  86. AddChannelOp (const int srcChan, const int dstChan, const bool cv) noexcept
  87. : srcChannelNum (srcChan), dstChannelNum (dstChan), isCV (cv) {}
  88. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  89. AudioSampleBuffer& sharedCVBufferChans,
  90. const OwnedArray<MidiBuffer>&,
  91. const int numSamples)
  92. {
  93. if (isCV)
  94. sharedCVBufferChans.addFrom (dstChannelNum, 0, sharedCVBufferChans, srcChannelNum, 0, numSamples);
  95. else
  96. sharedAudioBufferChans.addFrom (dstChannelNum, 0, sharedAudioBufferChans, srcChannelNum, 0, numSamples);
  97. }
  98. const int srcChannelNum, dstChannelNum;
  99. const bool isCV;
  100. CARLA_DECLARE_NON_COPY_CLASS (AddChannelOp)
  101. };
  102. //==============================================================================
  103. struct ClearMidiBufferOp : public AudioGraphRenderingOp<ClearMidiBufferOp>
  104. {
  105. ClearMidiBufferOp (const int buffer) noexcept : bufferNum (buffer) {}
  106. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  107. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  108. const int)
  109. {
  110. sharedMidiBuffers.getUnchecked (bufferNum)->clear();
  111. }
  112. const int bufferNum;
  113. CARLA_DECLARE_NON_COPY_CLASS (ClearMidiBufferOp)
  114. };
  115. //==============================================================================
  116. struct CopyMidiBufferOp : public AudioGraphRenderingOp<CopyMidiBufferOp>
  117. {
  118. CopyMidiBufferOp (const int srcBuffer, const int dstBuffer) noexcept
  119. : srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
  120. {}
  121. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  122. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  123. const int)
  124. {
  125. *sharedMidiBuffers.getUnchecked (dstBufferNum) = *sharedMidiBuffers.getUnchecked (srcBufferNum);
  126. }
  127. const int srcBufferNum, dstBufferNum;
  128. CARLA_DECLARE_NON_COPY_CLASS (CopyMidiBufferOp)
  129. };
  130. //==============================================================================
  131. struct AddMidiBufferOp : public AudioGraphRenderingOp<AddMidiBufferOp>
  132. {
  133. AddMidiBufferOp (const int srcBuffer, const int dstBuffer)
  134. : srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
  135. {}
  136. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  137. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  138. const int numSamples)
  139. {
  140. sharedMidiBuffers.getUnchecked (dstBufferNum)
  141. ->addEvents (*sharedMidiBuffers.getUnchecked (srcBufferNum), 0, numSamples, 0);
  142. }
  143. const int srcBufferNum, dstBufferNum;
  144. CARLA_DECLARE_NON_COPY_CLASS (AddMidiBufferOp)
  145. };
  146. //==============================================================================
  147. struct DelayChannelOp : public AudioGraphRenderingOp<DelayChannelOp>
  148. {
  149. DelayChannelOp (const int chan, const int delaySize, const bool cv)
  150. : channel (chan),
  151. bufferSize (delaySize + 1),
  152. readIndex (0), writeIndex (delaySize),
  153. isCV (cv)
  154. {
  155. buffer.calloc ((size_t) bufferSize);
  156. }
  157. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  158. AudioSampleBuffer& sharedCVBufferChans,
  159. const OwnedArray<MidiBuffer>&,
  160. const int numSamples)
  161. {
  162. float* data = isCV
  163. ? sharedCVBufferChans.getWritePointer (channel, 0)
  164. : sharedAudioBufferChans.getWritePointer (channel, 0);
  165. HeapBlock<float>& block = buffer;
  166. for (int i = numSamples; --i >= 0;)
  167. {
  168. block [writeIndex] = *data;
  169. *data++ = block [readIndex];
  170. if (++readIndex >= bufferSize) readIndex = 0;
  171. if (++writeIndex >= bufferSize) writeIndex = 0;
  172. }
  173. }
  174. private:
  175. HeapBlock<float> buffer;
  176. const int channel, bufferSize;
  177. int readIndex, writeIndex;
  178. const bool isCV;
  179. CARLA_DECLARE_NON_COPY_CLASS (DelayChannelOp)
  180. };
  181. //==============================================================================
  182. struct ProcessBufferOp : public AudioGraphRenderingOp<ProcessBufferOp>
  183. {
  184. ProcessBufferOp (const AudioProcessorGraph::Node::Ptr& n,
  185. const Array<uint>& audioChannelsUsed,
  186. const uint totalNumChans,
  187. const Array<uint>& cvInChannelsUsed,
  188. const Array<uint>& cvOutChannelsUsed,
  189. const int midiBuffer)
  190. : node (n),
  191. processor (n->getProcessor()),
  192. audioChannelsToUse (audioChannelsUsed),
  193. cvInChannelsToUse (cvInChannelsUsed),
  194. cvOutChannelsToUse (cvOutChannelsUsed),
  195. totalAudioChans (jmax (1U, totalNumChans)),
  196. totalCVIns (cvInChannelsUsed.size()),
  197. totalCVOuts (cvOutChannelsUsed.size()),
  198. midiBufferToUse (midiBuffer)
  199. {
  200. audioChannels.calloc (totalAudioChans);
  201. cvInChannels.calloc (totalCVIns);
  202. cvOutChannels.calloc (totalCVOuts);
  203. while (audioChannelsToUse.size() < static_cast<int>(totalAudioChans))
  204. audioChannelsToUse.add (0);
  205. }
  206. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  207. AudioSampleBuffer& sharedCVBufferChans,
  208. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  209. const int numSamples)
  210. {
  211. HeapBlock<float*>& audioChannelsCopy = audioChannels;
  212. HeapBlock<float*>& cvInChannelsCopy = cvInChannels;
  213. HeapBlock<float*>& cvOutChannelsCopy = cvOutChannels;
  214. for (uint i = 0; i < totalAudioChans; ++i)
  215. audioChannelsCopy[i] = sharedAudioBufferChans.getWritePointer (audioChannelsToUse.getUnchecked (i), 0);
  216. for (uint i = 0; i < totalCVIns; ++i)
  217. cvInChannels[i] = sharedCVBufferChans.getWritePointer (cvInChannelsToUse.getUnchecked (i), 0);
  218. for (uint i = 0; i < totalCVOuts; ++i)
  219. cvOutChannels[i] = sharedCVBufferChans.getWritePointer (cvOutChannelsToUse.getUnchecked (i), 0);
  220. AudioSampleBuffer audioBuffer (audioChannelsCopy, totalAudioChans, numSamples);
  221. AudioSampleBuffer cvInBuffer (cvInChannelsCopy, totalCVIns, numSamples);
  222. AudioSampleBuffer cvOutBuffer (cvOutChannelsCopy, totalCVOuts, numSamples);
  223. if (processor->isSuspended())
  224. {
  225. audioBuffer.clear();
  226. cvOutBuffer.clear();
  227. }
  228. else
  229. {
  230. const CarlaRecursiveMutexLocker cml (processor->getCallbackLock());
  231. callProcess (audioBuffer, cvInBuffer, cvOutBuffer, *sharedMidiBuffers.getUnchecked (midiBufferToUse));
  232. }
  233. }
  234. void callProcess (AudioSampleBuffer& audioBuffer,
  235. AudioSampleBuffer& cvInBuffer,
  236. AudioSampleBuffer& cvOutBuffer,
  237. MidiBuffer& midiMessages)
  238. {
  239. processor->processBlockWithCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  240. }
  241. const AudioProcessorGraph::Node::Ptr node;
  242. AudioProcessor* const processor;
  243. private:
  244. Array<uint> audioChannelsToUse;
  245. Array<uint> cvInChannelsToUse;
  246. Array<uint> cvOutChannelsToUse;
  247. HeapBlock<float*> audioChannels;
  248. HeapBlock<float*> cvInChannels;
  249. HeapBlock<float*> cvOutChannels;
  250. AudioSampleBuffer tempBuffer;
  251. const uint totalAudioChans;
  252. const uint totalCVIns;
  253. const uint totalCVOuts;
  254. const int midiBufferToUse;
  255. CARLA_DECLARE_NON_COPY_CLASS (ProcessBufferOp)
  256. };
  257. //==============================================================================
  258. /** Used to calculate the correct sequence of rendering ops needed, based on
  259. the best re-use of shared buffers at each stage.
  260. */
  261. struct RenderingOpSequenceCalculator
  262. {
  263. RenderingOpSequenceCalculator (AudioProcessorGraph& g,
  264. const Array<AudioProcessorGraph::Node*>& nodes,
  265. Array<void*>& renderingOps)
  266. : graph (g),
  267. orderedNodes (nodes),
  268. totalLatency (0)
  269. {
  270. audioNodeIds.add ((uint32) zeroNodeID); // first buffer is read-only zeros
  271. audioChannels.add (0);
  272. cvNodeIds.add ((uint32) zeroNodeID);
  273. cvChannels.add (0);
  274. midiNodeIds.add ((uint32) zeroNodeID);
  275. for (int i = 0; i < orderedNodes.size(); ++i)
  276. {
  277. createRenderingOpsForNode (*orderedNodes.getUnchecked(i), renderingOps, i);
  278. markAnyUnusedBuffersAsFree (i);
  279. }
  280. graph.setLatencySamples (totalLatency);
  281. }
  282. int getNumAudioBuffersNeeded() const noexcept { return audioNodeIds.size(); }
  283. int getNumCVBuffersNeeded() const noexcept { return cvNodeIds.size(); }
  284. int getNumMidiBuffersNeeded() const noexcept { return midiNodeIds.size(); }
  285. private:
  286. //==============================================================================
  287. AudioProcessorGraph& graph;
  288. const Array<AudioProcessorGraph::Node*>& orderedNodes;
  289. Array<uint> audioChannels, cvChannels;
  290. Array<uint32> audioNodeIds, cvNodeIds, midiNodeIds;
  291. enum { freeNodeID = 0xffffffff, zeroNodeID = 0xfffffffe };
  292. static bool isNodeBusy (uint32 nodeID) noexcept { return nodeID != freeNodeID && nodeID != zeroNodeID; }
  293. Array<uint32> nodeDelayIDs;
  294. Array<int> nodeDelays;
  295. int totalLatency;
  296. int getNodeDelay (const uint32 nodeID) const { return nodeDelays [nodeDelayIDs.indexOf (nodeID)]; }
  297. void setNodeDelay (const uint32 nodeID, const int latency)
  298. {
  299. const int index = nodeDelayIDs.indexOf (nodeID);
  300. if (index >= 0)
  301. {
  302. nodeDelays.set (index, latency);
  303. }
  304. else
  305. {
  306. nodeDelayIDs.add (nodeID);
  307. nodeDelays.add (latency);
  308. }
  309. }
  310. int getInputLatencyForNode (const uint32 nodeID) const
  311. {
  312. int maxLatency = 0;
  313. for (int i = graph.getNumConnections(); --i >= 0;)
  314. {
  315. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  316. if (c->destNodeId == nodeID)
  317. maxLatency = jmax (maxLatency, getNodeDelay (c->sourceNodeId));
  318. }
  319. return maxLatency;
  320. }
  321. //==============================================================================
  322. void createRenderingOpsForNode (AudioProcessorGraph::Node& node,
  323. Array<void*>& renderingOps,
  324. const int ourRenderingIndex)
  325. {
  326. AudioProcessor& processor = *node.getProcessor();
  327. const uint numAudioIns = processor.getTotalNumInputChannels(AudioProcessor::ChannelTypeAudio);
  328. const uint numAudioOuts = processor.getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio);
  329. const uint numCVIns = processor.getTotalNumInputChannels(AudioProcessor::ChannelTypeCV);
  330. const uint numCVOuts = processor.getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV);
  331. const uint totalAudioChans = jmax (numAudioIns, numAudioOuts);
  332. Array<uint> audioChannelsToUse, cvInChannelsToUse, cvOutChannelsToUse;
  333. int midiBufferToUse = -1;
  334. int maxLatency = getInputLatencyForNode (node.nodeId);
  335. for (uint inputChan = 0; inputChan < numAudioIns; ++inputChan)
  336. {
  337. // get a list of all the inputs to this node
  338. Array<uint32> sourceNodes;
  339. Array<uint> sourceOutputChans;
  340. for (int i = graph.getNumConnections(); --i >= 0;)
  341. {
  342. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  343. if (c->destNodeId == node.nodeId
  344. && c->destChannelIndex == inputChan
  345. && c->channelType == AudioProcessor::ChannelTypeAudio)
  346. {
  347. sourceNodes.add (c->sourceNodeId);
  348. sourceOutputChans.add (c->sourceChannelIndex);
  349. }
  350. }
  351. int bufIndex = -1;
  352. if (sourceNodes.size() == 0)
  353. {
  354. // unconnected input channel
  355. if (inputChan >= numAudioOuts)
  356. {
  357. bufIndex = getReadOnlyEmptyBuffer();
  358. wassert (bufIndex >= 0);
  359. }
  360. else
  361. {
  362. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  363. renderingOps.add (new ClearChannelOp (bufIndex, false));
  364. }
  365. }
  366. else if (sourceNodes.size() == 1)
  367. {
  368. // channel with a straightforward single input..
  369. const uint32 srcNode = sourceNodes.getUnchecked(0);
  370. const uint srcChan = sourceOutputChans.getUnchecked(0);
  371. bufIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio, srcNode, srcChan);
  372. if (bufIndex < 0)
  373. {
  374. // if not found, this is probably a feedback loop
  375. bufIndex = getReadOnlyEmptyBuffer();
  376. wassert (bufIndex >= 0);
  377. }
  378. if (inputChan < numAudioOuts
  379. && isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  380. ourRenderingIndex,
  381. inputChan,
  382. srcNode, srcChan))
  383. {
  384. // can't mess up this channel because it's needed later by another node, so we
  385. // need to use a copy of it..
  386. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  387. renderingOps.add (new CopyChannelOp (bufIndex, newFreeBuffer, false));
  388. bufIndex = newFreeBuffer;
  389. }
  390. const int nodeDelay = getNodeDelay (srcNode);
  391. if (nodeDelay < maxLatency)
  392. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, false));
  393. }
  394. else
  395. {
  396. // channel with a mix of several inputs..
  397. // try to find a re-usable channel from our inputs..
  398. int reusableInputIndex = -1;
  399. for (int i = 0; i < sourceNodes.size(); ++i)
  400. {
  401. const int sourceBufIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  402. sourceNodes.getUnchecked(i),
  403. sourceOutputChans.getUnchecked(i));
  404. if (sourceBufIndex >= 0
  405. && ! isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  406. ourRenderingIndex,
  407. inputChan,
  408. sourceNodes.getUnchecked(i),
  409. sourceOutputChans.getUnchecked(i)))
  410. {
  411. // we've found one of our input chans that can be re-used..
  412. reusableInputIndex = i;
  413. bufIndex = sourceBufIndex;
  414. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (i));
  415. if (nodeDelay < maxLatency)
  416. renderingOps.add (new DelayChannelOp (sourceBufIndex, maxLatency - nodeDelay, false));
  417. break;
  418. }
  419. }
  420. if (reusableInputIndex < 0)
  421. {
  422. // can't re-use any of our input chans, so get a new one and copy everything into it..
  423. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  424. wassert (bufIndex != 0);
  425. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  426. sourceNodes.getUnchecked (0),
  427. sourceOutputChans.getUnchecked (0));
  428. if (srcIndex < 0)
  429. {
  430. // if not found, this is probably a feedback loop
  431. renderingOps.add (new ClearChannelOp (bufIndex, false));
  432. }
  433. else
  434. {
  435. renderingOps.add (new CopyChannelOp (srcIndex, bufIndex, false));
  436. }
  437. reusableInputIndex = 0;
  438. const int nodeDelay = getNodeDelay (sourceNodes.getFirst());
  439. if (nodeDelay < maxLatency)
  440. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, false));
  441. }
  442. for (int j = 0; j < sourceNodes.size(); ++j)
  443. {
  444. if (j != reusableInputIndex)
  445. {
  446. int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  447. sourceNodes.getUnchecked(j),
  448. sourceOutputChans.getUnchecked(j));
  449. if (srcIndex >= 0)
  450. {
  451. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (j));
  452. if (nodeDelay < maxLatency)
  453. {
  454. if (! isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  455. ourRenderingIndex, inputChan,
  456. sourceNodes.getUnchecked(j),
  457. sourceOutputChans.getUnchecked(j)))
  458. {
  459. renderingOps.add (new DelayChannelOp (srcIndex, maxLatency - nodeDelay, false));
  460. }
  461. else // buffer is reused elsewhere, can't be delayed
  462. {
  463. const int bufferToDelay = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  464. renderingOps.add (new CopyChannelOp (srcIndex, bufferToDelay, false));
  465. renderingOps.add (new DelayChannelOp (bufferToDelay, maxLatency - nodeDelay, false));
  466. srcIndex = bufferToDelay;
  467. }
  468. }
  469. renderingOps.add (new AddChannelOp (srcIndex, bufIndex, false));
  470. }
  471. }
  472. }
  473. }
  474. CARLA_SAFE_ASSERT_CONTINUE (bufIndex >= 0);
  475. audioChannelsToUse.add (bufIndex);
  476. if (inputChan < numAudioOuts)
  477. markBufferAsContaining (AudioProcessor::ChannelTypeAudio, bufIndex, node.nodeId, inputChan);
  478. }
  479. for (uint outputChan = numAudioIns; outputChan < numAudioOuts; ++outputChan)
  480. {
  481. const int bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  482. CARLA_SAFE_ASSERT_CONTINUE (bufIndex > 0);
  483. audioChannelsToUse.add (bufIndex);
  484. markBufferAsContaining (AudioProcessor::ChannelTypeAudio, bufIndex, node.nodeId, outputChan);
  485. }
  486. for (uint inputChan = 0; inputChan < numCVIns; ++inputChan)
  487. {
  488. // get a list of all the inputs to this node
  489. Array<uint32> sourceNodes;
  490. Array<uint> sourceOutputChans;
  491. for (int i = graph.getNumConnections(); --i >= 0;)
  492. {
  493. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  494. if (c->destNodeId == node.nodeId
  495. && c->destChannelIndex == inputChan
  496. && c->channelType == AudioProcessor::ChannelTypeCV)
  497. {
  498. sourceNodes.add (c->sourceNodeId);
  499. sourceOutputChans.add (c->sourceChannelIndex);
  500. }
  501. }
  502. int bufIndex = -1;
  503. if (sourceNodes.size() == 0)
  504. {
  505. // unconnected input channel
  506. bufIndex = getReadOnlyEmptyBuffer();
  507. wassert (bufIndex >= 0);
  508. }
  509. else if (sourceNodes.size() == 1)
  510. {
  511. // channel with a straightforward single input..
  512. const uint32 srcNode = sourceNodes.getUnchecked(0);
  513. const uint srcChan = sourceOutputChans.getUnchecked(0);
  514. bufIndex = getBufferContaining (AudioProcessor::ChannelTypeCV, srcNode, srcChan);
  515. if (bufIndex < 0)
  516. {
  517. // if not found, this is probably a feedback loop
  518. bufIndex = getReadOnlyEmptyBuffer();
  519. wassert (bufIndex >= 0);
  520. }
  521. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  522. renderingOps.add (new CopyChannelOp (bufIndex, newFreeBuffer, true));
  523. bufIndex = newFreeBuffer;
  524. const int nodeDelay = getNodeDelay (srcNode);
  525. if (nodeDelay < maxLatency)
  526. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, true));
  527. }
  528. else
  529. {
  530. // channel with a mix of several inputs..
  531. {
  532. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  533. wassert (bufIndex != 0);
  534. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeCV,
  535. sourceNodes.getUnchecked (0),
  536. sourceOutputChans.getUnchecked (0));
  537. if (srcIndex < 0)
  538. {
  539. // if not found, this is probably a feedback loop
  540. renderingOps.add (new ClearChannelOp (bufIndex, true));
  541. }
  542. else
  543. {
  544. renderingOps.add (new CopyChannelOp (srcIndex, bufIndex, true));
  545. }
  546. const int nodeDelay = getNodeDelay (sourceNodes.getFirst());
  547. if (nodeDelay < maxLatency)
  548. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, true));
  549. }
  550. for (int j = 1; j < sourceNodes.size(); ++j)
  551. {
  552. int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeCV,
  553. sourceNodes.getUnchecked(j),
  554. sourceOutputChans.getUnchecked(j));
  555. if (srcIndex >= 0)
  556. {
  557. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (j));
  558. if (nodeDelay < maxLatency)
  559. {
  560. const int bufferToDelay = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  561. renderingOps.add (new CopyChannelOp (srcIndex, bufferToDelay, true));
  562. renderingOps.add (new DelayChannelOp (bufferToDelay, maxLatency - nodeDelay, true));
  563. srcIndex = bufferToDelay;
  564. }
  565. renderingOps.add (new AddChannelOp (srcIndex, bufIndex, true));
  566. }
  567. }
  568. }
  569. CARLA_SAFE_ASSERT_CONTINUE (bufIndex >= 0);
  570. cvInChannelsToUse.add (bufIndex);
  571. markBufferAsContaining (AudioProcessor::ChannelTypeCV, bufIndex, node.nodeId, inputChan);
  572. }
  573. for (uint outputChan = 0; outputChan < numCVOuts; ++outputChan)
  574. {
  575. const int bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  576. CARLA_SAFE_ASSERT_CONTINUE (bufIndex > 0);
  577. cvOutChannelsToUse.add (bufIndex);
  578. markBufferAsContaining (AudioProcessor::ChannelTypeCV, bufIndex, node.nodeId, outputChan);
  579. }
  580. // Now the same thing for midi..
  581. Array<uint32> midiSourceNodes;
  582. for (int i = graph.getNumConnections(); --i >= 0;)
  583. {
  584. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  585. if (c->destNodeId == node.nodeId && c->channelType == AudioProcessor::ChannelTypeMIDI)
  586. midiSourceNodes.add (c->sourceNodeId);
  587. }
  588. if (midiSourceNodes.size() == 0)
  589. {
  590. // No midi inputs..
  591. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI); // need to pick a buffer even if the processor doesn't use midi
  592. if (processor.acceptsMidi() || processor.producesMidi())
  593. renderingOps.add (new ClearMidiBufferOp (midiBufferToUse));
  594. }
  595. else if (midiSourceNodes.size() == 1)
  596. {
  597. // One midi input..
  598. midiBufferToUse = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  599. midiSourceNodes.getUnchecked(0),
  600. 0);
  601. if (midiBufferToUse >= 0)
  602. {
  603. if (isBufferNeededLater (AudioProcessor::ChannelTypeMIDI,
  604. ourRenderingIndex, 0,
  605. midiSourceNodes.getUnchecked(0), 0))
  606. {
  607. // can't mess up this channel because it's needed later by another node, so we
  608. // need to use a copy of it..
  609. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeMIDI);
  610. renderingOps.add (new CopyMidiBufferOp (midiBufferToUse, newFreeBuffer));
  611. midiBufferToUse = newFreeBuffer;
  612. }
  613. }
  614. else
  615. {
  616. // probably a feedback loop, so just use an empty one..
  617. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI); // need to pick a buffer even if the processor doesn't use midi
  618. }
  619. }
  620. else
  621. {
  622. // More than one midi input being mixed..
  623. int reusableInputIndex = -1;
  624. for (int i = 0; i < midiSourceNodes.size(); ++i)
  625. {
  626. const int sourceBufIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  627. midiSourceNodes.getUnchecked(i),
  628. 0);
  629. if (sourceBufIndex >= 0
  630. && ! isBufferNeededLater (AudioProcessor::ChannelTypeMIDI,
  631. ourRenderingIndex, 0,
  632. midiSourceNodes.getUnchecked(i), 0))
  633. {
  634. // we've found one of our input buffers that can be re-used..
  635. reusableInputIndex = i;
  636. midiBufferToUse = sourceBufIndex;
  637. break;
  638. }
  639. }
  640. if (reusableInputIndex < 0)
  641. {
  642. // can't re-use any of our input buffers, so get a new one and copy everything into it..
  643. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI);
  644. wassert (midiBufferToUse >= 0);
  645. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  646. midiSourceNodes.getUnchecked(0),
  647. 0);
  648. if (srcIndex >= 0)
  649. renderingOps.add (new CopyMidiBufferOp (srcIndex, midiBufferToUse));
  650. else
  651. renderingOps.add (new ClearMidiBufferOp (midiBufferToUse));
  652. reusableInputIndex = 0;
  653. }
  654. for (int j = 0; j < midiSourceNodes.size(); ++j)
  655. {
  656. if (j != reusableInputIndex)
  657. {
  658. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  659. midiSourceNodes.getUnchecked(j),
  660. 0);
  661. if (srcIndex >= 0)
  662. renderingOps.add (new AddMidiBufferOp (srcIndex, midiBufferToUse));
  663. }
  664. }
  665. }
  666. if (processor.producesMidi())
  667. markBufferAsContaining (AudioProcessor::ChannelTypeMIDI,
  668. midiBufferToUse, node.nodeId,
  669. 0);
  670. setNodeDelay (node.nodeId, maxLatency + processor.getLatencySamples());
  671. if (numAudioOuts == 0)
  672. totalLatency = maxLatency;
  673. renderingOps.add (new ProcessBufferOp (&node,
  674. audioChannelsToUse,
  675. totalAudioChans,
  676. cvInChannelsToUse,
  677. cvOutChannelsToUse,
  678. midiBufferToUse));
  679. }
  680. //==============================================================================
  681. int getFreeBuffer (const AudioProcessor::ChannelType channelType)
  682. {
  683. switch (channelType)
  684. {
  685. case AudioProcessor::ChannelTypeAudio:
  686. for (int i = 1; i < audioNodeIds.size(); ++i)
  687. if (audioNodeIds.getUnchecked(i) == freeNodeID)
  688. return i;
  689. audioNodeIds.add ((uint32) freeNodeID);
  690. audioChannels.add (0);
  691. return audioNodeIds.size() - 1;
  692. case AudioProcessor::ChannelTypeCV:
  693. for (int i = 1; i < cvNodeIds.size(); ++i)
  694. if (cvNodeIds.getUnchecked(i) == freeNodeID)
  695. return i;
  696. cvNodeIds.add ((uint32) freeNodeID);
  697. cvChannels.add (0);
  698. return cvNodeIds.size() - 1;
  699. case AudioProcessor::ChannelTypeMIDI:
  700. for (int i = 1; i < midiNodeIds.size(); ++i)
  701. if (midiNodeIds.getUnchecked(i) == freeNodeID)
  702. return i;
  703. midiNodeIds.add ((uint32) freeNodeID);
  704. return midiNodeIds.size() - 1;
  705. }
  706. return -1;
  707. }
  708. int getReadOnlyEmptyBuffer() const noexcept
  709. {
  710. return 0;
  711. }
  712. int getBufferContaining (const AudioProcessor::ChannelType channelType,
  713. const uint32 nodeId,
  714. const uint outputChannel) const noexcept
  715. {
  716. switch (channelType)
  717. {
  718. case AudioProcessor::ChannelTypeAudio:
  719. for (int i = audioNodeIds.size(); --i >= 0;)
  720. if (audioNodeIds.getUnchecked(i) == nodeId && audioChannels.getUnchecked(i) == outputChannel)
  721. return i;
  722. break;
  723. case AudioProcessor::ChannelTypeCV:
  724. for (int i = cvNodeIds.size(); --i >= 0;)
  725. if (cvNodeIds.getUnchecked(i) == nodeId && cvChannels.getUnchecked(i) == outputChannel)
  726. return i;
  727. break;
  728. case AudioProcessor::ChannelTypeMIDI:
  729. for (int i = midiNodeIds.size(); --i >= 0;)
  730. {
  731. if (midiNodeIds.getUnchecked(i) == nodeId)
  732. return i;
  733. }
  734. break;
  735. }
  736. return -1;
  737. }
  738. void markAnyUnusedBuffersAsFree (const int stepIndex)
  739. {
  740. for (int i = 0; i < audioNodeIds.size(); ++i)
  741. {
  742. if (isNodeBusy (audioNodeIds.getUnchecked(i))
  743. && ! isBufferNeededLater (AudioProcessor::ChannelTypeAudio,
  744. stepIndex, -1,
  745. audioNodeIds.getUnchecked(i),
  746. audioChannels.getUnchecked(i)))
  747. {
  748. audioNodeIds.set (i, (uint32) freeNodeID);
  749. }
  750. }
  751. // NOTE: CV skipped on purpose
  752. for (int i = 0; i < midiNodeIds.size(); ++i)
  753. {
  754. if (isNodeBusy (midiNodeIds.getUnchecked(i))
  755. && ! isBufferNeededLater (AudioProcessor::ChannelTypeMIDI,
  756. stepIndex, -1,
  757. midiNodeIds.getUnchecked(i), 0))
  758. {
  759. midiNodeIds.set (i, (uint32) freeNodeID);
  760. }
  761. }
  762. }
  763. bool isBufferNeededLater (const AudioProcessor::ChannelType channelType,
  764. int stepIndexToSearchFrom,
  765. uint inputChannelOfIndexToIgnore,
  766. const uint32 nodeId,
  767. const uint outputChanIndex) const
  768. {
  769. while (stepIndexToSearchFrom < orderedNodes.size())
  770. {
  771. const AudioProcessorGraph::Node* const node = (const AudioProcessorGraph::Node*) orderedNodes.getUnchecked (stepIndexToSearchFrom);
  772. for (uint i = 0; i < node->getProcessor()->getTotalNumInputChannels(channelType); ++i)
  773. if (i != inputChannelOfIndexToIgnore
  774. && graph.getConnectionBetween (channelType,
  775. nodeId, outputChanIndex,
  776. node->nodeId, i) != nullptr)
  777. return true;
  778. inputChannelOfIndexToIgnore = (uint)-1;
  779. ++stepIndexToSearchFrom;
  780. }
  781. return false;
  782. }
  783. void markBufferAsContaining (const AudioProcessor::ChannelType channelType,
  784. int bufferNum, uint32 nodeId, int outputIndex)
  785. {
  786. switch (channelType)
  787. {
  788. case AudioProcessor::ChannelTypeAudio:
  789. CARLA_SAFE_ASSERT_BREAK (bufferNum >= 0 && bufferNum < audioNodeIds.size());
  790. audioNodeIds.set (bufferNum, nodeId);
  791. audioChannels.set (bufferNum, outputIndex);
  792. break;
  793. case AudioProcessor::ChannelTypeCV:
  794. CARLA_SAFE_ASSERT_BREAK (bufferNum >= 0 && bufferNum < cvNodeIds.size());
  795. cvNodeIds.set (bufferNum, nodeId);
  796. cvChannels.set (bufferNum, outputIndex);
  797. break;
  798. case AudioProcessor::ChannelTypeMIDI:
  799. CARLA_SAFE_ASSERT_BREAK (bufferNum > 0 && bufferNum < midiNodeIds.size());
  800. midiNodeIds.set (bufferNum, nodeId);
  801. break;
  802. }
  803. }
  804. CARLA_DECLARE_NON_COPY_CLASS (RenderingOpSequenceCalculator)
  805. };
  806. //==============================================================================
  807. // Holds a fast lookup table for checking which nodes are inputs to others.
  808. class ConnectionLookupTable
  809. {
  810. public:
  811. explicit ConnectionLookupTable (const OwnedArray<AudioProcessorGraph::Connection>& connections)
  812. {
  813. for (int i = 0; i < static_cast<int>(connections.size()); ++i)
  814. {
  815. const AudioProcessorGraph::Connection* const c = connections.getUnchecked(i);
  816. int index;
  817. Entry* entry = findEntry (c->destNodeId, index);
  818. if (entry == nullptr)
  819. {
  820. entry = new Entry (c->destNodeId);
  821. entries.insert (index, entry);
  822. }
  823. entry->srcNodes.add (c->sourceNodeId);
  824. }
  825. }
  826. bool isAnInputTo (const uint32 possibleInputId,
  827. const uint32 possibleDestinationId) const noexcept
  828. {
  829. return isAnInputToRecursive (possibleInputId, possibleDestinationId, entries.size());
  830. }
  831. private:
  832. //==============================================================================
  833. struct Entry
  834. {
  835. explicit Entry (const uint32 destNodeId_) noexcept : destNodeId (destNodeId_) {}
  836. const uint32 destNodeId;
  837. SortedSet<uint32> srcNodes;
  838. CARLA_DECLARE_NON_COPY_CLASS (Entry)
  839. };
  840. OwnedArray<Entry> entries;
  841. bool isAnInputToRecursive (const uint32 possibleInputId,
  842. const uint32 possibleDestinationId,
  843. int recursionCheck) const noexcept
  844. {
  845. int index;
  846. if (const Entry* const entry = findEntry (possibleDestinationId, index))
  847. {
  848. const SortedSet<uint32>& srcNodes = entry->srcNodes;
  849. if (srcNodes.contains (possibleInputId))
  850. return true;
  851. if (--recursionCheck >= 0)
  852. {
  853. for (int i = 0; i < srcNodes.size(); ++i)
  854. if (isAnInputToRecursive (possibleInputId, srcNodes.getUnchecked(i), recursionCheck))
  855. return true;
  856. }
  857. }
  858. return false;
  859. }
  860. Entry* findEntry (const uint32 destNodeId, int& insertIndex) const noexcept
  861. {
  862. Entry* result = nullptr;
  863. int start = 0;
  864. int end = entries.size();
  865. for (;;)
  866. {
  867. if (start >= end)
  868. {
  869. break;
  870. }
  871. else if (destNodeId == entries.getUnchecked (start)->destNodeId)
  872. {
  873. result = entries.getUnchecked (start);
  874. break;
  875. }
  876. else
  877. {
  878. const int halfway = (start + end) / 2;
  879. if (halfway == start)
  880. {
  881. if (destNodeId >= entries.getUnchecked (halfway)->destNodeId)
  882. ++start;
  883. break;
  884. }
  885. else if (destNodeId >= entries.getUnchecked (halfway)->destNodeId)
  886. start = halfway;
  887. else
  888. end = halfway;
  889. }
  890. }
  891. insertIndex = start;
  892. return result;
  893. }
  894. CARLA_DECLARE_NON_COPY_CLASS (ConnectionLookupTable)
  895. };
  896. //==============================================================================
  897. struct ConnectionSorter
  898. {
  899. static int compareElements (const AudioProcessorGraph::Connection* const first,
  900. const AudioProcessorGraph::Connection* const second) noexcept
  901. {
  902. if (first->sourceNodeId < second->sourceNodeId) return -1;
  903. if (first->sourceNodeId > second->sourceNodeId) return 1;
  904. if (first->destNodeId < second->destNodeId) return -1;
  905. if (first->destNodeId > second->destNodeId) return 1;
  906. if (first->sourceChannelIndex < second->sourceChannelIndex) return -1;
  907. if (first->sourceChannelIndex > second->sourceChannelIndex) return 1;
  908. if (first->destChannelIndex < second->destChannelIndex) return -1;
  909. if (first->destChannelIndex > second->destChannelIndex) return 1;
  910. return 0;
  911. }
  912. };
  913. }
  914. //==============================================================================
  915. AudioProcessorGraph::Connection::Connection (ChannelType ct,
  916. const uint32 sourceID, const uint sourceChannel,
  917. const uint32 destID, const uint destChannel) noexcept
  918. : channelType (ct),
  919. sourceNodeId (sourceID), sourceChannelIndex (sourceChannel),
  920. destNodeId (destID), destChannelIndex (destChannel)
  921. {
  922. }
  923. //==============================================================================
  924. AudioProcessorGraph::Node::Node (const uint32 nodeID, AudioProcessor* const p) noexcept
  925. : nodeId (nodeID), processor (p), isPrepared (false)
  926. {
  927. wassert (processor != nullptr);
  928. }
  929. void AudioProcessorGraph::Node::prepare (const double newSampleRate, const int newBlockSize,
  930. AudioProcessorGraph* const graph)
  931. {
  932. if (! isPrepared)
  933. {
  934. isPrepared = true;
  935. setParentGraph (graph);
  936. processor->setRateAndBufferSizeDetails (newSampleRate, newBlockSize);
  937. processor->prepareToPlay (newSampleRate, newBlockSize);
  938. }
  939. }
  940. void AudioProcessorGraph::Node::unprepare()
  941. {
  942. if (isPrepared)
  943. {
  944. isPrepared = false;
  945. processor->releaseResources();
  946. }
  947. }
  948. void AudioProcessorGraph::Node::setParentGraph (AudioProcessorGraph* const graph) const
  949. {
  950. if (AudioProcessorGraph::AudioGraphIOProcessor* const ioProc
  951. = dynamic_cast<AudioProcessorGraph::AudioGraphIOProcessor*> (processor.get()))
  952. ioProc->setParentGraph (graph);
  953. }
  954. //==============================================================================
  955. struct AudioProcessorGraph::AudioProcessorGraphBufferHelpers
  956. {
  957. AudioProcessorGraphBufferHelpers() noexcept
  958. : currentAudioInputBuffer (nullptr),
  959. currentCVInputBuffer (nullptr) {}
  960. void setRenderingBufferSize (int newNumAudioChannels, int newNumCVChannels, int newNumSamples) noexcept
  961. {
  962. renderingAudioBuffers.setSize (newNumAudioChannels, newNumSamples);
  963. renderingAudioBuffers.clear();
  964. renderingCVBuffers.setSize (newNumCVChannels, newNumSamples);
  965. renderingCVBuffers.clear();
  966. }
  967. void release() noexcept
  968. {
  969. renderingAudioBuffers.setSize (1, 1);
  970. currentAudioInputBuffer = nullptr;
  971. currentCVInputBuffer = nullptr;
  972. currentAudioOutputBuffer.setSize (1, 1);
  973. currentCVOutputBuffer.setSize (1, 1);
  974. renderingCVBuffers.setSize (1, 1);
  975. }
  976. void prepareInOutBuffers (int newNumAudioChannels, int newNumCVChannels, int newNumSamples) noexcept
  977. {
  978. currentAudioInputBuffer = nullptr;
  979. currentCVInputBuffer = nullptr;
  980. currentAudioOutputBuffer.setSize (newNumAudioChannels, newNumSamples);
  981. currentCVOutputBuffer.setSize (newNumCVChannels, newNumSamples);
  982. }
  983. AudioSampleBuffer renderingAudioBuffers;
  984. AudioSampleBuffer renderingCVBuffers;
  985. AudioSampleBuffer* currentAudioInputBuffer;
  986. const AudioSampleBuffer* currentCVInputBuffer;
  987. AudioSampleBuffer currentAudioOutputBuffer;
  988. AudioSampleBuffer currentCVOutputBuffer;
  989. };
  990. //==============================================================================
  991. AudioProcessorGraph::AudioProcessorGraph()
  992. : lastNodeId (0), audioAndCVBuffers (new AudioProcessorGraphBufferHelpers),
  993. currentMidiInputBuffer (nullptr), isPrepared (false), needsReorder (false)
  994. {
  995. }
  996. AudioProcessorGraph::~AudioProcessorGraph()
  997. {
  998. clearRenderingSequence();
  999. clear();
  1000. }
  1001. const String AudioProcessorGraph::getName() const
  1002. {
  1003. return "Audio Graph";
  1004. }
  1005. //==============================================================================
  1006. void AudioProcessorGraph::clear()
  1007. {
  1008. nodes.clear();
  1009. connections.clear();
  1010. needsReorder = true;
  1011. }
  1012. AudioProcessorGraph::Node* AudioProcessorGraph::getNodeForId (const uint32 nodeId) const
  1013. {
  1014. for (int i = nodes.size(); --i >= 0;)
  1015. if (nodes.getUnchecked(i)->nodeId == nodeId)
  1016. return nodes.getUnchecked(i);
  1017. return nullptr;
  1018. }
  1019. AudioProcessorGraph::Node* AudioProcessorGraph::addNode (AudioProcessor* const newProcessor, uint32 nodeId)
  1020. {
  1021. CARLA_SAFE_ASSERT_RETURN (newProcessor != nullptr && newProcessor != this, nullptr);
  1022. for (int i = nodes.size(); --i >= 0;)
  1023. {
  1024. CARLA_SAFE_ASSERT_RETURN (nodes.getUnchecked(i)->getProcessor() != newProcessor, nullptr);
  1025. }
  1026. if (nodeId == 0)
  1027. {
  1028. nodeId = ++lastNodeId;
  1029. }
  1030. else
  1031. {
  1032. // you can't add a node with an id that already exists in the graph..
  1033. CARLA_SAFE_ASSERT_RETURN (getNodeForId (nodeId) == nullptr, nullptr);
  1034. removeNode (nodeId);
  1035. if (nodeId > lastNodeId)
  1036. lastNodeId = nodeId;
  1037. }
  1038. Node* const n = new Node (nodeId, newProcessor);
  1039. nodes.add (n);
  1040. if (isPrepared)
  1041. needsReorder = true;
  1042. n->setParentGraph (this);
  1043. return n;
  1044. }
  1045. bool AudioProcessorGraph::removeNode (const uint32 nodeId)
  1046. {
  1047. disconnectNode (nodeId);
  1048. for (int i = nodes.size(); --i >= 0;)
  1049. {
  1050. if (nodes.getUnchecked(i)->nodeId == nodeId)
  1051. {
  1052. nodes.remove (i);
  1053. if (isPrepared)
  1054. needsReorder = true;
  1055. return true;
  1056. }
  1057. }
  1058. return false;
  1059. }
  1060. bool AudioProcessorGraph::removeNode (Node* node)
  1061. {
  1062. CARLA_SAFE_ASSERT_RETURN(node != nullptr, false);
  1063. return removeNode (node->nodeId);
  1064. }
  1065. //==============================================================================
  1066. const AudioProcessorGraph::Connection* AudioProcessorGraph::getConnectionBetween (const ChannelType ct,
  1067. const uint32 sourceNodeId,
  1068. const uint sourceChannelIndex,
  1069. const uint32 destNodeId,
  1070. const uint destChannelIndex) const
  1071. {
  1072. const Connection c (ct, sourceNodeId, sourceChannelIndex, destNodeId, destChannelIndex);
  1073. GraphRenderingOps::ConnectionSorter sorter;
  1074. return connections [connections.indexOfSorted (sorter, &c)];
  1075. }
  1076. bool AudioProcessorGraph::isConnected (const uint32 possibleSourceNodeId,
  1077. const uint32 possibleDestNodeId) const
  1078. {
  1079. for (int i = connections.size(); --i >= 0;)
  1080. {
  1081. const Connection* const c = connections.getUnchecked(i);
  1082. if (c->sourceNodeId == possibleSourceNodeId
  1083. && c->destNodeId == possibleDestNodeId)
  1084. {
  1085. return true;
  1086. }
  1087. }
  1088. return false;
  1089. }
  1090. bool AudioProcessorGraph::canConnect (ChannelType ct,
  1091. const uint32 sourceNodeId,
  1092. const uint sourceChannelIndex,
  1093. const uint32 destNodeId,
  1094. const uint destChannelIndex) const
  1095. {
  1096. if (sourceNodeId == destNodeId)
  1097. return false;
  1098. const Node* const source = getNodeForId (sourceNodeId);
  1099. if (source == nullptr
  1100. || (ct != ChannelTypeMIDI && sourceChannelIndex >= source->processor->getTotalNumOutputChannels(ct))
  1101. || (ct == ChannelTypeMIDI && ! source->processor->producesMidi()))
  1102. return false;
  1103. const Node* const dest = getNodeForId (destNodeId);
  1104. if (dest == nullptr
  1105. || (ct != ChannelTypeMIDI && destChannelIndex >= dest->processor->getTotalNumInputChannels(ct))
  1106. || (ct == ChannelTypeMIDI && ! dest->processor->acceptsMidi()))
  1107. return false;
  1108. return getConnectionBetween (ct,
  1109. sourceNodeId, sourceChannelIndex,
  1110. destNodeId, destChannelIndex) == nullptr;
  1111. }
  1112. bool AudioProcessorGraph::addConnection (const ChannelType ct,
  1113. const uint32 sourceNodeId,
  1114. const uint sourceChannelIndex,
  1115. const uint32 destNodeId,
  1116. const uint destChannelIndex)
  1117. {
  1118. if (! canConnect (ct, sourceNodeId, sourceChannelIndex, destNodeId, destChannelIndex))
  1119. return false;
  1120. GraphRenderingOps::ConnectionSorter sorter;
  1121. connections.addSorted (sorter, new Connection (ct,
  1122. sourceNodeId, sourceChannelIndex,
  1123. destNodeId, destChannelIndex));
  1124. if (isPrepared)
  1125. needsReorder = true;
  1126. return true;
  1127. }
  1128. void AudioProcessorGraph::removeConnection (const int index)
  1129. {
  1130. connections.remove (index);
  1131. if (isPrepared)
  1132. needsReorder = true;
  1133. }
  1134. bool AudioProcessorGraph::removeConnection (const ChannelType ct,
  1135. const uint32 sourceNodeId, const uint sourceChannelIndex,
  1136. const uint32 destNodeId, const uint destChannelIndex)
  1137. {
  1138. bool doneAnything = false;
  1139. for (int i = connections.size(); --i >= 0;)
  1140. {
  1141. const Connection* const c = connections.getUnchecked(i);
  1142. if (c->channelType == ct
  1143. && c->sourceNodeId == sourceNodeId
  1144. && c->destNodeId == destNodeId
  1145. && c->sourceChannelIndex == sourceChannelIndex
  1146. && c->destChannelIndex == destChannelIndex)
  1147. {
  1148. removeConnection (i);
  1149. doneAnything = true;
  1150. }
  1151. }
  1152. return doneAnything;
  1153. }
  1154. bool AudioProcessorGraph::disconnectNode (const uint32 nodeId)
  1155. {
  1156. bool doneAnything = false;
  1157. for (int i = connections.size(); --i >= 0;)
  1158. {
  1159. const Connection* const c = connections.getUnchecked(i);
  1160. if (c->sourceNodeId == nodeId || c->destNodeId == nodeId)
  1161. {
  1162. removeConnection (i);
  1163. doneAnything = true;
  1164. }
  1165. }
  1166. return doneAnything;
  1167. }
  1168. bool AudioProcessorGraph::isConnectionLegal (const Connection* const c) const
  1169. {
  1170. CARLA_SAFE_ASSERT_RETURN (c != nullptr, false);
  1171. const Node* const source = getNodeForId (c->sourceNodeId);
  1172. const Node* const dest = getNodeForId (c->destNodeId);
  1173. return source != nullptr
  1174. && dest != nullptr
  1175. && (c->channelType != ChannelTypeMIDI ? (c->sourceChannelIndex < source->processor->getTotalNumOutputChannels(c->channelType))
  1176. : source->processor->producesMidi())
  1177. && (c->channelType != ChannelTypeMIDI ? (c->destChannelIndex < dest->processor->getTotalNumInputChannels(c->channelType))
  1178. : dest->processor->acceptsMidi());
  1179. }
  1180. bool AudioProcessorGraph::removeIllegalConnections()
  1181. {
  1182. bool doneAnything = false;
  1183. for (int i = connections.size(); --i >= 0;)
  1184. {
  1185. if (! isConnectionLegal (connections.getUnchecked(i)))
  1186. {
  1187. removeConnection (i);
  1188. doneAnything = true;
  1189. }
  1190. }
  1191. return doneAnything;
  1192. }
  1193. //==============================================================================
  1194. static void deleteRenderOpArray (Array<void*>& ops)
  1195. {
  1196. for (int i = ops.size(); --i >= 0;)
  1197. delete static_cast<GraphRenderingOps::AudioGraphRenderingOpBase*> (ops.getUnchecked(i));
  1198. }
  1199. void AudioProcessorGraph::clearRenderingSequence()
  1200. {
  1201. Array<void*> oldOps;
  1202. {
  1203. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1204. renderingOps.swapWith (oldOps);
  1205. }
  1206. deleteRenderOpArray (oldOps);
  1207. }
  1208. bool AudioProcessorGraph::isAnInputTo (const uint32 possibleInputId,
  1209. const uint32 possibleDestinationId,
  1210. const int recursionCheck) const
  1211. {
  1212. if (recursionCheck > 0)
  1213. {
  1214. for (int i = connections.size(); --i >= 0;)
  1215. {
  1216. const AudioProcessorGraph::Connection* const c = connections.getUnchecked (i);
  1217. if (c->destNodeId == possibleDestinationId
  1218. && (c->sourceNodeId == possibleInputId
  1219. || isAnInputTo (possibleInputId, c->sourceNodeId, recursionCheck - 1)))
  1220. return true;
  1221. }
  1222. }
  1223. return false;
  1224. }
  1225. void AudioProcessorGraph::buildRenderingSequence()
  1226. {
  1227. Array<void*> newRenderingOps;
  1228. int numAudioRenderingBuffersNeeded = 2;
  1229. int numCVRenderingBuffersNeeded = 0;
  1230. int numMidiBuffersNeeded = 1;
  1231. {
  1232. const CarlaRecursiveMutexLocker cml (reorderMutex);
  1233. Array<Node*> orderedNodes;
  1234. {
  1235. const GraphRenderingOps::ConnectionLookupTable table (connections);
  1236. for (int i = 0; i < nodes.size(); ++i)
  1237. {
  1238. Node* const node = nodes.getUnchecked(i);
  1239. node->prepare (getSampleRate(), getBlockSize(), this);
  1240. int j = 0;
  1241. for (; j < orderedNodes.size(); ++j)
  1242. if (table.isAnInputTo (node->nodeId, ((Node*) orderedNodes.getUnchecked(j))->nodeId))
  1243. break;
  1244. orderedNodes.insert (j, node);
  1245. }
  1246. }
  1247. GraphRenderingOps::RenderingOpSequenceCalculator calculator (*this, orderedNodes, newRenderingOps);
  1248. numAudioRenderingBuffersNeeded = calculator.getNumAudioBuffersNeeded();
  1249. numCVRenderingBuffersNeeded = calculator.getNumCVBuffersNeeded();
  1250. numMidiBuffersNeeded = calculator.getNumMidiBuffersNeeded();
  1251. }
  1252. {
  1253. // swap over to the new rendering sequence..
  1254. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1255. audioAndCVBuffers->setRenderingBufferSize (numAudioRenderingBuffersNeeded,
  1256. numCVRenderingBuffersNeeded,
  1257. getBlockSize());
  1258. for (int i = static_cast<int>(midiBuffers.size()); --i >= 0;)
  1259. midiBuffers.getUnchecked(i)->clear();
  1260. while (static_cast<int>(midiBuffers.size()) < numMidiBuffersNeeded)
  1261. midiBuffers.add (new MidiBuffer());
  1262. renderingOps.swapWith (newRenderingOps);
  1263. }
  1264. // delete the old ones..
  1265. deleteRenderOpArray (newRenderingOps);
  1266. }
  1267. //==============================================================================
  1268. void AudioProcessorGraph::prepareToPlay (double sampleRate, int estimatedSamplesPerBlock)
  1269. {
  1270. setRateAndBufferSizeDetails(sampleRate, estimatedSamplesPerBlock);
  1271. audioAndCVBuffers->prepareInOutBuffers(jmax(1U, getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio)),
  1272. jmax(1U, getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV)),
  1273. estimatedSamplesPerBlock);
  1274. currentMidiInputBuffer = nullptr;
  1275. currentMidiOutputBuffer.clear();
  1276. clearRenderingSequence();
  1277. buildRenderingSequence();
  1278. isPrepared = true;
  1279. }
  1280. void AudioProcessorGraph::releaseResources()
  1281. {
  1282. isPrepared = false;
  1283. for (int i = 0; i < nodes.size(); ++i)
  1284. nodes.getUnchecked(i)->unprepare();
  1285. audioAndCVBuffers->release();
  1286. midiBuffers.clear();
  1287. currentMidiInputBuffer = nullptr;
  1288. currentMidiOutputBuffer.clear();
  1289. }
  1290. void AudioProcessorGraph::reset()
  1291. {
  1292. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1293. for (int i = 0; i < nodes.size(); ++i)
  1294. nodes.getUnchecked(i)->getProcessor()->reset();
  1295. }
  1296. void AudioProcessorGraph::setNonRealtime (bool isProcessingNonRealtime) noexcept
  1297. {
  1298. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1299. AudioProcessor::setNonRealtime (isProcessingNonRealtime);
  1300. for (int i = 0; i < nodes.size(); ++i)
  1301. nodes.getUnchecked(i)->getProcessor()->setNonRealtime (isProcessingNonRealtime);
  1302. }
  1303. /*
  1304. void AudioProcessorGraph::processAudio (AudioSampleBuffer& audioBuffer, MidiBuffer& midiMessages)
  1305. {
  1306. AudioSampleBuffer*& currentAudioInputBuffer = audioAndCVBuffers->currentAudioInputBuffer;
  1307. AudioSampleBuffer& currentAudioOutputBuffer = audioAndCVBuffers->currentAudioOutputBuffer;
  1308. AudioSampleBuffer& renderingAudioBuffers = audioAndCVBuffers->renderingAudioBuffers;
  1309. AudioSampleBuffer& renderingCVBuffers = audioAndCVBuffers->renderingCVBuffers;
  1310. const int numSamples = audioBuffer.getNumSamples();
  1311. if (! audioAndCVBuffers->currentAudioOutputBuffer.setSizeRT(numSamples))
  1312. return;
  1313. if (! audioAndCVBuffers->renderingAudioBuffers.setSizeRT(numSamples))
  1314. return;
  1315. if (! audioAndCVBuffers->renderingCVBuffers.setSizeRT(numSamples))
  1316. return;
  1317. currentAudioInputBuffer = &audioBuffer;
  1318. currentAudioOutputBuffer.clear();
  1319. currentMidiInputBuffer = &midiMessages;
  1320. currentMidiOutputBuffer.clear();
  1321. for (int i = 0; i < renderingOps.size(); ++i)
  1322. {
  1323. GraphRenderingOps::AudioGraphRenderingOpBase* const op
  1324. = (GraphRenderingOps::AudioGraphRenderingOpBase*) renderingOps.getUnchecked(i);
  1325. op->perform (renderingAudioBuffers, renderingCVBuffers, midiBuffers, numSamples);
  1326. }
  1327. for (uint32_t i = 0; i < audioBuffer.getNumChannels(); ++i)
  1328. audioBuffer.copyFrom (i, 0, currentAudioOutputBuffer, i, 0, numSamples);
  1329. midiMessages.clear();
  1330. midiMessages.addEvents (currentMidiOutputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1331. }
  1332. */
  1333. void AudioProcessorGraph::processAudioAndCV (AudioSampleBuffer& audioBuffer,
  1334. const AudioSampleBuffer& cvInBuffer,
  1335. AudioSampleBuffer& cvOutBuffer,
  1336. MidiBuffer& midiMessages)
  1337. {
  1338. AudioSampleBuffer*& currentAudioInputBuffer = audioAndCVBuffers->currentAudioInputBuffer;
  1339. const AudioSampleBuffer*& currentCVInputBuffer = audioAndCVBuffers->currentCVInputBuffer;
  1340. AudioSampleBuffer& currentAudioOutputBuffer = audioAndCVBuffers->currentAudioOutputBuffer;
  1341. AudioSampleBuffer& currentCVOutputBuffer = audioAndCVBuffers->currentCVOutputBuffer;
  1342. AudioSampleBuffer& renderingAudioBuffers = audioAndCVBuffers->renderingAudioBuffers;
  1343. AudioSampleBuffer& renderingCVBuffers = audioAndCVBuffers->renderingCVBuffers;
  1344. const int numSamples = audioBuffer.getNumSamples();
  1345. if (! audioAndCVBuffers->currentAudioOutputBuffer.setSizeRT(numSamples))
  1346. return;
  1347. if (! audioAndCVBuffers->currentCVOutputBuffer.setSizeRT(numSamples))
  1348. return;
  1349. if (! audioAndCVBuffers->renderingAudioBuffers.setSizeRT(numSamples))
  1350. return;
  1351. if (! audioAndCVBuffers->renderingCVBuffers.setSizeRT(numSamples))
  1352. return;
  1353. currentAudioInputBuffer = &audioBuffer;
  1354. currentCVInputBuffer = &cvInBuffer;
  1355. currentAudioOutputBuffer.clear();
  1356. currentCVOutputBuffer.clear();
  1357. currentMidiInputBuffer = &midiMessages;
  1358. currentMidiOutputBuffer.clear();
  1359. for (int i = 0; i < renderingOps.size(); ++i)
  1360. {
  1361. GraphRenderingOps::AudioGraphRenderingOpBase* const op
  1362. = (GraphRenderingOps::AudioGraphRenderingOpBase*) renderingOps.getUnchecked(i);
  1363. op->perform (renderingAudioBuffers, renderingCVBuffers, midiBuffers, numSamples);
  1364. }
  1365. for (uint32_t i = 0; i < audioBuffer.getNumChannels(); ++i)
  1366. audioBuffer.copyFrom (i, 0, currentAudioOutputBuffer, i, 0, numSamples);
  1367. for (uint32_t i = 0; i < cvOutBuffer.getNumChannels(); ++i)
  1368. cvOutBuffer.copyFrom (i, 0, currentCVOutputBuffer, i, 0, numSamples);
  1369. midiMessages.clear();
  1370. midiMessages.addEvents (currentMidiOutputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1371. }
  1372. bool AudioProcessorGraph::acceptsMidi() const { return true; }
  1373. bool AudioProcessorGraph::producesMidi() const { return true; }
  1374. /*
  1375. void AudioProcessorGraph::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
  1376. {
  1377. processAudio (buffer, midiMessages);
  1378. }
  1379. */
  1380. void AudioProcessorGraph::processBlockWithCV (AudioSampleBuffer& audioBuffer,
  1381. const AudioSampleBuffer& cvInBuffer,
  1382. AudioSampleBuffer& cvOutBuffer,
  1383. MidiBuffer& midiMessages)
  1384. {
  1385. processAudioAndCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  1386. }
  1387. void AudioProcessorGraph::reorderNowIfNeeded()
  1388. {
  1389. if (needsReorder)
  1390. {
  1391. needsReorder = false;
  1392. buildRenderingSequence();
  1393. }
  1394. }
  1395. const CarlaRecursiveMutex& AudioProcessorGraph::getReorderMutex() const
  1396. {
  1397. return reorderMutex;
  1398. }
  1399. //==============================================================================
  1400. AudioProcessorGraph::AudioGraphIOProcessor::AudioGraphIOProcessor (const IODeviceType deviceType)
  1401. : type (deviceType), graph (nullptr)
  1402. {
  1403. }
  1404. AudioProcessorGraph::AudioGraphIOProcessor::~AudioGraphIOProcessor()
  1405. {
  1406. }
  1407. const String AudioProcessorGraph::AudioGraphIOProcessor::getName() const
  1408. {
  1409. switch (type)
  1410. {
  1411. case audioOutputNode: return "Audio Output";
  1412. case audioInputNode: return "Audio Input";
  1413. case cvOutputNode: return "CV Output";
  1414. case cvInputNode: return "CV Input";
  1415. case midiOutputNode: return "Midi Output";
  1416. case midiInputNode: return "Midi Input";
  1417. default: break;
  1418. }
  1419. return String();
  1420. }
  1421. void AudioProcessorGraph::AudioGraphIOProcessor::prepareToPlay (double, int)
  1422. {
  1423. CARLA_SAFE_ASSERT (graph != nullptr);
  1424. }
  1425. void AudioProcessorGraph::AudioGraphIOProcessor::releaseResources()
  1426. {
  1427. }
  1428. void AudioProcessorGraph::AudioGraphIOProcessor::processAudioAndCV (AudioSampleBuffer& audioBuffer,
  1429. const AudioSampleBuffer& cvInBuffer,
  1430. AudioSampleBuffer& cvOutBuffer,
  1431. MidiBuffer& midiMessages)
  1432. {
  1433. CARLA_SAFE_ASSERT_RETURN(graph != nullptr,);
  1434. switch (type)
  1435. {
  1436. case audioOutputNode:
  1437. {
  1438. AudioSampleBuffer& currentAudioOutputBuffer =
  1439. graph->audioAndCVBuffers->currentAudioOutputBuffer;
  1440. for (int i = jmin (currentAudioOutputBuffer.getNumChannels(),
  1441. audioBuffer.getNumChannels()); --i >= 0;)
  1442. {
  1443. currentAudioOutputBuffer.addFrom (i, 0, audioBuffer, i, 0, audioBuffer.getNumSamples());
  1444. }
  1445. break;
  1446. }
  1447. case audioInputNode:
  1448. {
  1449. AudioSampleBuffer*& currentAudioInputBuffer =
  1450. graph->audioAndCVBuffers->currentAudioInputBuffer;
  1451. for (int i = jmin (currentAudioInputBuffer->getNumChannels(),
  1452. audioBuffer.getNumChannels()); --i >= 0;)
  1453. {
  1454. audioBuffer.copyFrom (i, 0, *currentAudioInputBuffer, i, 0, audioBuffer.getNumSamples());
  1455. }
  1456. break;
  1457. }
  1458. case cvOutputNode:
  1459. {
  1460. AudioSampleBuffer& currentCVOutputBuffer =
  1461. graph->audioAndCVBuffers->currentCVOutputBuffer;
  1462. for (int i = jmin (currentCVOutputBuffer.getNumChannels(),
  1463. cvInBuffer.getNumChannels()); --i >= 0;)
  1464. {
  1465. currentCVOutputBuffer.addFrom (i, 0, cvInBuffer, i, 0, cvInBuffer.getNumSamples());
  1466. }
  1467. break;
  1468. }
  1469. case cvInputNode:
  1470. {
  1471. const AudioSampleBuffer*& currentCVInputBuffer =
  1472. graph->audioAndCVBuffers->currentCVInputBuffer;
  1473. for (int i = jmin (currentCVInputBuffer->getNumChannels(),
  1474. cvOutBuffer.getNumChannels()); --i >= 0;)
  1475. {
  1476. cvOutBuffer.copyFrom (i, 0, *currentCVInputBuffer, i, 0, cvOutBuffer.getNumSamples());
  1477. }
  1478. break;
  1479. }
  1480. case midiOutputNode:
  1481. graph->currentMidiOutputBuffer.addEvents (midiMessages, 0, audioBuffer.getNumSamples(), 0);
  1482. break;
  1483. case midiInputNode:
  1484. midiMessages.addEvents (*graph->currentMidiInputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1485. break;
  1486. default:
  1487. break;
  1488. }
  1489. }
  1490. void AudioProcessorGraph::AudioGraphIOProcessor::processBlockWithCV (AudioSampleBuffer& audioBuffer,
  1491. const AudioSampleBuffer& cvInBuffer,
  1492. AudioSampleBuffer& cvOutBuffer,
  1493. MidiBuffer& midiMessages)
  1494. {
  1495. processAudioAndCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  1496. }
  1497. bool AudioProcessorGraph::AudioGraphIOProcessor::acceptsMidi() const
  1498. {
  1499. return type == midiOutputNode;
  1500. }
  1501. bool AudioProcessorGraph::AudioGraphIOProcessor::producesMidi() const
  1502. {
  1503. return type == midiInputNode;
  1504. }
  1505. bool AudioProcessorGraph::AudioGraphIOProcessor::isInput() const noexcept
  1506. {
  1507. return type == audioInputNode || type == cvInputNode || type == midiInputNode;
  1508. }
  1509. bool AudioProcessorGraph::AudioGraphIOProcessor::isOutput() const noexcept
  1510. {
  1511. return type == audioOutputNode || type == cvOutputNode || type == midiOutputNode;
  1512. }
  1513. void AudioProcessorGraph::AudioGraphIOProcessor::setParentGraph (AudioProcessorGraph* const newGraph)
  1514. {
  1515. graph = newGraph;
  1516. if (graph != nullptr)
  1517. {
  1518. setPlayConfigDetails (type == audioOutputNode
  1519. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio)
  1520. : 0,
  1521. type == audioInputNode
  1522. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeAudio)
  1523. : 0,
  1524. type == cvOutputNode
  1525. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV)
  1526. : 0,
  1527. type == cvInputNode
  1528. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeCV)
  1529. : 0,
  1530. type == midiOutputNode
  1531. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeMIDI)
  1532. : 0,
  1533. type == midiInputNode
  1534. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeMIDI)
  1535. : 0,
  1536. getSampleRate(),
  1537. getBlockSize());
  1538. }
  1539. }
  1540. }