Audio plugin host https://kx.studio/carla
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1891 lines
71KB

  1. /*
  2. ==============================================================================
  3. This file is part of the Water library.
  4. Copyright (c) 2015 ROLI Ltd.
  5. Copyright (C) 2017-2018 Filipe Coelho <falktx@falktx.com>
  6. Permission is granted to use this software under the terms of the GNU
  7. General Public License as published by the Free Software Foundation;
  8. either version 2 of the License, or any later version.
  9. This program is distributed in the hope that it will be useful, but WITHOUT
  10. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  11. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  12. For a full copy of the GNU General Public License see the doc/GPL.txt file.
  13. ==============================================================================
  14. */
  15. #include "AudioProcessorGraph.h"
  16. #include "../containers/SortedSet.h"
  17. namespace water {
  18. const uint AudioProcessorGraph::midiChannelIndex = 0x4000;
  19. //==============================================================================
  20. namespace GraphRenderingOps
  21. {
  22. struct AudioGraphRenderingOpBase
  23. {
  24. AudioGraphRenderingOpBase() noexcept {}
  25. virtual ~AudioGraphRenderingOpBase() {}
  26. virtual void perform (AudioSampleBuffer& sharedAudioBufferChans,
  27. AudioSampleBuffer& sharedCVBufferChans,
  28. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  29. const int numSamples) = 0;
  30. };
  31. // use CRTP
  32. template <class Child>
  33. struct AudioGraphRenderingOp : public AudioGraphRenderingOpBase
  34. {
  35. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  36. AudioSampleBuffer& sharedCVBufferChans,
  37. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  38. const int numSamples) override
  39. {
  40. static_cast<Child*> (this)->perform (sharedAudioBufferChans,
  41. sharedCVBufferChans,
  42. sharedMidiBuffers,
  43. numSamples);
  44. }
  45. };
  46. //==============================================================================
  47. struct ClearChannelOp : public AudioGraphRenderingOp<ClearChannelOp>
  48. {
  49. ClearChannelOp (const int channel, const bool cv) noexcept
  50. : channelNum (channel), isCV (cv) {}
  51. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  52. AudioSampleBuffer& sharedCVBufferChans,
  53. const OwnedArray<MidiBuffer>&,
  54. const int numSamples)
  55. {
  56. if (isCV)
  57. sharedCVBufferChans.clear (channelNum, 0, numSamples);
  58. else
  59. sharedAudioBufferChans.clear (channelNum, 0, numSamples);
  60. }
  61. const int channelNum;
  62. const bool isCV;
  63. CARLA_DECLARE_NON_COPY_CLASS (ClearChannelOp)
  64. };
  65. //==============================================================================
  66. struct CopyChannelOp : public AudioGraphRenderingOp<CopyChannelOp>
  67. {
  68. CopyChannelOp (const int srcChan, const int dstChan, const bool cv) noexcept
  69. : srcChannelNum (srcChan), dstChannelNum (dstChan), isCV (cv) {}
  70. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  71. AudioSampleBuffer& sharedCVBufferChans,
  72. const OwnedArray<MidiBuffer>&,
  73. const int numSamples)
  74. {
  75. if (isCV)
  76. sharedCVBufferChans.copyFrom (dstChannelNum, 0, sharedCVBufferChans, srcChannelNum, 0, numSamples);
  77. else
  78. sharedAudioBufferChans.copyFrom (dstChannelNum, 0, sharedAudioBufferChans, srcChannelNum, 0, numSamples);
  79. }
  80. const int srcChannelNum, dstChannelNum;
  81. const bool isCV;
  82. CARLA_DECLARE_NON_COPY_CLASS (CopyChannelOp)
  83. };
  84. //==============================================================================
  85. struct AddChannelOp : public AudioGraphRenderingOp<AddChannelOp>
  86. {
  87. AddChannelOp (const int srcChan, const int dstChan, const bool cv) noexcept
  88. : srcChannelNum (srcChan), dstChannelNum (dstChan), isCV (cv) {}
  89. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  90. AudioSampleBuffer& sharedCVBufferChans,
  91. const OwnedArray<MidiBuffer>&,
  92. const int numSamples)
  93. {
  94. if (isCV)
  95. sharedCVBufferChans.addFrom (dstChannelNum, 0, sharedCVBufferChans, srcChannelNum, 0, numSamples);
  96. else
  97. sharedAudioBufferChans.addFrom (dstChannelNum, 0, sharedAudioBufferChans, srcChannelNum, 0, numSamples);
  98. }
  99. const int srcChannelNum, dstChannelNum;
  100. const bool isCV;
  101. CARLA_DECLARE_NON_COPY_CLASS (AddChannelOp)
  102. };
  103. //==============================================================================
  104. struct ClearMidiBufferOp : public AudioGraphRenderingOp<ClearMidiBufferOp>
  105. {
  106. ClearMidiBufferOp (const int buffer) noexcept : bufferNum (buffer) {}
  107. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  108. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  109. const int)
  110. {
  111. sharedMidiBuffers.getUnchecked (bufferNum)->clear();
  112. }
  113. const int bufferNum;
  114. CARLA_DECLARE_NON_COPY_CLASS (ClearMidiBufferOp)
  115. };
  116. //==============================================================================
  117. struct CopyMidiBufferOp : public AudioGraphRenderingOp<CopyMidiBufferOp>
  118. {
  119. CopyMidiBufferOp (const int srcBuffer, const int dstBuffer) noexcept
  120. : srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
  121. {}
  122. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  123. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  124. const int)
  125. {
  126. *sharedMidiBuffers.getUnchecked (dstBufferNum) = *sharedMidiBuffers.getUnchecked (srcBufferNum);
  127. }
  128. const int srcBufferNum, dstBufferNum;
  129. CARLA_DECLARE_NON_COPY_CLASS (CopyMidiBufferOp)
  130. };
  131. //==============================================================================
  132. struct AddMidiBufferOp : public AudioGraphRenderingOp<AddMidiBufferOp>
  133. {
  134. AddMidiBufferOp (const int srcBuffer, const int dstBuffer)
  135. : srcBufferNum (srcBuffer), dstBufferNum (dstBuffer)
  136. {}
  137. void perform (AudioSampleBuffer&, AudioSampleBuffer&,
  138. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  139. const int numSamples)
  140. {
  141. sharedMidiBuffers.getUnchecked (dstBufferNum)
  142. ->addEvents (*sharedMidiBuffers.getUnchecked (srcBufferNum), 0, numSamples, 0);
  143. }
  144. const int srcBufferNum, dstBufferNum;
  145. CARLA_DECLARE_NON_COPY_CLASS (AddMidiBufferOp)
  146. };
  147. //==============================================================================
  148. struct DelayChannelOp : public AudioGraphRenderingOp<DelayChannelOp>
  149. {
  150. DelayChannelOp (const int chan, const int delaySize, const bool cv)
  151. : channel (chan),
  152. bufferSize (delaySize + 1),
  153. readIndex (0), writeIndex (delaySize),
  154. isCV (cv)
  155. {
  156. buffer.calloc ((size_t) bufferSize);
  157. }
  158. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  159. AudioSampleBuffer& sharedCVBufferChans,
  160. const OwnedArray<MidiBuffer>&,
  161. const int numSamples)
  162. {
  163. float* data = isCV
  164. ? sharedCVBufferChans.getWritePointer (channel, 0)
  165. : sharedAudioBufferChans.getWritePointer (channel, 0);
  166. HeapBlock<float>& block = buffer;
  167. for (int i = numSamples; --i >= 0;)
  168. {
  169. block [writeIndex] = *data;
  170. *data++ = block [readIndex];
  171. if (++readIndex >= bufferSize) readIndex = 0;
  172. if (++writeIndex >= bufferSize) writeIndex = 0;
  173. }
  174. }
  175. private:
  176. HeapBlock<float> buffer;
  177. const int channel, bufferSize;
  178. int readIndex, writeIndex;
  179. const bool isCV;
  180. CARLA_DECLARE_NON_COPY_CLASS (DelayChannelOp)
  181. };
  182. //==============================================================================
  183. struct ProcessBufferOp : public AudioGraphRenderingOp<ProcessBufferOp>
  184. {
  185. ProcessBufferOp (const AudioProcessorGraph::Node::Ptr& n,
  186. const Array<uint>& audioChannelsUsed,
  187. const uint totalNumChans,
  188. const Array<uint>& cvInChannelsUsed,
  189. const Array<uint>& cvOutChannelsUsed,
  190. const int midiBuffer)
  191. : node (n),
  192. processor (n->getProcessor()),
  193. audioChannelsToUse (audioChannelsUsed),
  194. cvInChannelsToUse (cvInChannelsUsed),
  195. cvOutChannelsToUse (cvOutChannelsUsed),
  196. totalAudioChans (jmax (1U, totalNumChans)),
  197. totalCVIns (cvInChannelsUsed.size()),
  198. totalCVOuts (cvOutChannelsUsed.size()),
  199. midiBufferToUse (midiBuffer)
  200. {
  201. audioChannels.calloc (totalAudioChans);
  202. cvInChannels.calloc (totalCVIns);
  203. cvOutChannels.calloc (totalCVOuts);
  204. while (audioChannelsToUse.size() < static_cast<int>(totalAudioChans))
  205. audioChannelsToUse.add (0);
  206. }
  207. void perform (AudioSampleBuffer& sharedAudioBufferChans,
  208. AudioSampleBuffer& sharedCVBufferChans,
  209. const OwnedArray<MidiBuffer>& sharedMidiBuffers,
  210. const int numSamples)
  211. {
  212. HeapBlock<float*>& audioChannelsCopy = audioChannels;
  213. HeapBlock<float*>& cvInChannelsCopy = cvInChannels;
  214. HeapBlock<float*>& cvOutChannelsCopy = cvOutChannels;
  215. for (uint i = 0; i < totalAudioChans; ++i)
  216. audioChannelsCopy[i] = sharedAudioBufferChans.getWritePointer (audioChannelsToUse.getUnchecked (i), 0);
  217. for (uint i = 0; i < totalCVIns; ++i)
  218. cvInChannels[i] = sharedCVBufferChans.getWritePointer (cvInChannelsToUse.getUnchecked (i), 0);
  219. for (uint i = 0; i < totalCVOuts; ++i)
  220. cvOutChannels[i] = sharedCVBufferChans.getWritePointer (cvOutChannelsToUse.getUnchecked (i), 0);
  221. AudioSampleBuffer audioBuffer (audioChannelsCopy, totalAudioChans, numSamples);
  222. AudioSampleBuffer cvInBuffer (cvInChannelsCopy, totalCVIns, numSamples);
  223. AudioSampleBuffer cvOutBuffer (cvOutChannelsCopy, totalCVOuts, numSamples);
  224. if (processor->isSuspended())
  225. {
  226. audioBuffer.clear();
  227. cvOutBuffer.clear();
  228. }
  229. else
  230. {
  231. const CarlaRecursiveMutexLocker cml (processor->getCallbackLock());
  232. callProcess (audioBuffer, cvInBuffer, cvOutBuffer, *sharedMidiBuffers.getUnchecked (midiBufferToUse));
  233. }
  234. }
  235. void callProcess (AudioSampleBuffer& audioBuffer,
  236. AudioSampleBuffer& cvInBuffer,
  237. AudioSampleBuffer& cvOutBuffer,
  238. MidiBuffer& midiMessages)
  239. {
  240. processor->processBlockWithCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  241. }
  242. const AudioProcessorGraph::Node::Ptr node;
  243. AudioProcessor* const processor;
  244. private:
  245. Array<uint> audioChannelsToUse;
  246. Array<uint> cvInChannelsToUse;
  247. Array<uint> cvOutChannelsToUse;
  248. HeapBlock<float*> audioChannels;
  249. HeapBlock<float*> cvInChannels;
  250. HeapBlock<float*> cvOutChannels;
  251. AudioSampleBuffer tempBuffer;
  252. const uint totalAudioChans;
  253. const uint totalCVIns;
  254. const uint totalCVOuts;
  255. const int midiBufferToUse;
  256. CARLA_DECLARE_NON_COPY_CLASS (ProcessBufferOp)
  257. };
  258. //==============================================================================
  259. /** Used to calculate the correct sequence of rendering ops needed, based on
  260. the best re-use of shared buffers at each stage.
  261. */
  262. struct RenderingOpSequenceCalculator
  263. {
  264. RenderingOpSequenceCalculator (AudioProcessorGraph& g,
  265. const Array<AudioProcessorGraph::Node*>& nodes,
  266. Array<void*>& renderingOps)
  267. : graph (g),
  268. orderedNodes (nodes),
  269. totalLatency (0)
  270. {
  271. audioNodeIds.add ((uint32) zeroNodeID); // first buffer is read-only zeros
  272. audioChannels.add (0);
  273. cvNodeIds.add ((uint32) zeroNodeID);
  274. cvChannels.add (0);
  275. midiNodeIds.add ((uint32) zeroNodeID);
  276. for (int i = 0; i < orderedNodes.size(); ++i)
  277. {
  278. createRenderingOpsForNode (*orderedNodes.getUnchecked(i), renderingOps, i);
  279. markAnyUnusedBuffersAsFree (i);
  280. }
  281. graph.setLatencySamples (totalLatency);
  282. }
  283. int getNumAudioBuffersNeeded() const noexcept { return audioNodeIds.size(); }
  284. int getNumCVBuffersNeeded() const noexcept { return cvNodeIds.size(); }
  285. int getNumMidiBuffersNeeded() const noexcept { return midiNodeIds.size(); }
  286. private:
  287. //==============================================================================
  288. AudioProcessorGraph& graph;
  289. const Array<AudioProcessorGraph::Node*>& orderedNodes;
  290. Array<uint> audioChannels, cvChannels;
  291. Array<uint32> audioNodeIds, cvNodeIds, midiNodeIds;
  292. enum { freeNodeID = 0xffffffff, zeroNodeID = 0xfffffffe };
  293. static bool isNodeBusy (uint32 nodeID) noexcept { return nodeID != freeNodeID && nodeID != zeroNodeID; }
  294. Array<uint32> nodeDelayIDs;
  295. Array<int> nodeDelays;
  296. int totalLatency;
  297. int getNodeDelay (const uint32 nodeID) const { return nodeDelays [nodeDelayIDs.indexOf (nodeID)]; }
  298. void setNodeDelay (const uint32 nodeID, const int latency)
  299. {
  300. const int index = nodeDelayIDs.indexOf (nodeID);
  301. if (index >= 0)
  302. {
  303. nodeDelays.set (index, latency);
  304. }
  305. else
  306. {
  307. nodeDelayIDs.add (nodeID);
  308. nodeDelays.add (latency);
  309. }
  310. }
  311. int getInputLatencyForNode (const uint32 nodeID) const
  312. {
  313. int maxLatency = 0;
  314. for (int i = graph.getNumConnections(); --i >= 0;)
  315. {
  316. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  317. if (c->destNodeId == nodeID)
  318. maxLatency = jmax (maxLatency, getNodeDelay (c->sourceNodeId));
  319. }
  320. return maxLatency;
  321. }
  322. //==============================================================================
  323. void createRenderingOpsForNode (AudioProcessorGraph::Node& node,
  324. Array<void*>& renderingOps,
  325. const int ourRenderingIndex)
  326. {
  327. AudioProcessor& processor = *node.getProcessor();
  328. const uint numAudioIns = processor.getTotalNumInputChannels(AudioProcessor::ChannelTypeAudio);
  329. const uint numAudioOuts = processor.getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio);
  330. const uint numCVIns = processor.getTotalNumInputChannels(AudioProcessor::ChannelTypeCV);
  331. const uint numCVOuts = processor.getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV);
  332. const uint totalAudioChans = jmax (numAudioIns, numAudioOuts);
  333. Array<uint> audioChannelsToUse, cvInChannelsToUse, cvOutChannelsToUse;
  334. int midiBufferToUse = -1;
  335. int maxLatency = getInputLatencyForNode (node.nodeId);
  336. for (uint inputChan = 0; inputChan < numAudioIns; ++inputChan)
  337. {
  338. // get a list of all the inputs to this node
  339. Array<uint32> sourceNodes;
  340. Array<uint> sourceOutputChans;
  341. for (int i = graph.getNumConnections(); --i >= 0;)
  342. {
  343. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  344. if (c->destNodeId == node.nodeId
  345. && c->destChannelIndex == inputChan
  346. && c->channelType == AudioProcessor::ChannelTypeAudio)
  347. {
  348. sourceNodes.add (c->sourceNodeId);
  349. sourceOutputChans.add (c->sourceChannelIndex);
  350. }
  351. }
  352. int bufIndex = -1;
  353. if (sourceNodes.size() == 0)
  354. {
  355. // unconnected input channel
  356. if (inputChan >= numAudioOuts)
  357. {
  358. bufIndex = getReadOnlyEmptyBuffer();
  359. wassert (bufIndex >= 0);
  360. }
  361. else
  362. {
  363. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  364. renderingOps.add (new ClearChannelOp (bufIndex, false));
  365. }
  366. }
  367. else if (sourceNodes.size() == 1)
  368. {
  369. // channel with a straightforward single input..
  370. const uint32 srcNode = sourceNodes.getUnchecked(0);
  371. const uint srcChan = sourceOutputChans.getUnchecked(0);
  372. bufIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio, srcNode, srcChan);
  373. if (bufIndex < 0)
  374. {
  375. // if not found, this is probably a feedback loop
  376. bufIndex = getReadOnlyEmptyBuffer();
  377. wassert (bufIndex >= 0);
  378. }
  379. if (inputChan < numAudioOuts
  380. && isBufferNeededLater (ourRenderingIndex,
  381. inputChan,
  382. srcNode, srcChan))
  383. {
  384. // can't mess up this channel because it's needed later by another node, so we
  385. // need to use a copy of it..
  386. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  387. renderingOps.add (new CopyChannelOp (bufIndex, newFreeBuffer, false));
  388. bufIndex = newFreeBuffer;
  389. }
  390. const int nodeDelay = getNodeDelay (srcNode);
  391. if (nodeDelay < maxLatency)
  392. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, false));
  393. }
  394. else
  395. {
  396. // channel with a mix of several inputs..
  397. // try to find a re-usable channel from our inputs..
  398. int reusableInputIndex = -1;
  399. for (int i = 0; i < sourceNodes.size(); ++i)
  400. {
  401. const int sourceBufIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  402. sourceNodes.getUnchecked(i),
  403. sourceOutputChans.getUnchecked(i));
  404. if (sourceBufIndex >= 0
  405. && ! isBufferNeededLater (ourRenderingIndex,
  406. inputChan,
  407. sourceNodes.getUnchecked(i),
  408. sourceOutputChans.getUnchecked(i)))
  409. {
  410. // we've found one of our input chans that can be re-used..
  411. reusableInputIndex = i;
  412. bufIndex = sourceBufIndex;
  413. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (i));
  414. if (nodeDelay < maxLatency)
  415. renderingOps.add (new DelayChannelOp (sourceBufIndex, maxLatency - nodeDelay, false));
  416. break;
  417. }
  418. }
  419. if (reusableInputIndex < 0)
  420. {
  421. // can't re-use any of our input chans, so get a new one and copy everything into it..
  422. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  423. wassert (bufIndex != 0);
  424. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  425. sourceNodes.getUnchecked (0),
  426. sourceOutputChans.getUnchecked (0));
  427. if (srcIndex < 0)
  428. {
  429. // if not found, this is probably a feedback loop
  430. renderingOps.add (new ClearChannelOp (bufIndex, false));
  431. }
  432. else
  433. {
  434. renderingOps.add (new CopyChannelOp (srcIndex, bufIndex, false));
  435. }
  436. reusableInputIndex = 0;
  437. const int nodeDelay = getNodeDelay (sourceNodes.getFirst());
  438. if (nodeDelay < maxLatency)
  439. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, false));
  440. }
  441. for (int j = 0; j < sourceNodes.size(); ++j)
  442. {
  443. if (j != reusableInputIndex)
  444. {
  445. int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeAudio,
  446. sourceNodes.getUnchecked(j),
  447. sourceOutputChans.getUnchecked(j));
  448. if (srcIndex >= 0)
  449. {
  450. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (j));
  451. if (nodeDelay < maxLatency)
  452. {
  453. if (! isBufferNeededLater (ourRenderingIndex, inputChan,
  454. sourceNodes.getUnchecked(j),
  455. sourceOutputChans.getUnchecked(j)))
  456. {
  457. renderingOps.add (new DelayChannelOp (srcIndex, maxLatency - nodeDelay, false));
  458. }
  459. else // buffer is reused elsewhere, can't be delayed
  460. {
  461. const int bufferToDelay = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  462. renderingOps.add (new CopyChannelOp (srcIndex, bufferToDelay, false));
  463. renderingOps.add (new DelayChannelOp (bufferToDelay, maxLatency - nodeDelay, false));
  464. srcIndex = bufferToDelay;
  465. }
  466. }
  467. renderingOps.add (new AddChannelOp (srcIndex, bufIndex, false));
  468. }
  469. }
  470. }
  471. }
  472. CARLA_SAFE_ASSERT_CONTINUE (bufIndex >= 0);
  473. audioChannelsToUse.add (bufIndex);
  474. if (inputChan < numAudioOuts)
  475. markBufferAsContaining (AudioProcessor::ChannelTypeAudio, bufIndex, node.nodeId, inputChan);
  476. }
  477. for (uint outputChan = numAudioIns; outputChan < numAudioOuts; ++outputChan)
  478. {
  479. const int bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeAudio);
  480. CARLA_SAFE_ASSERT_CONTINUE (bufIndex > 0);
  481. audioChannelsToUse.add (bufIndex);
  482. markBufferAsContaining (AudioProcessor::ChannelTypeAudio, bufIndex, node.nodeId, outputChan);
  483. }
  484. for (uint inputChan = 0; inputChan < numCVIns; ++inputChan)
  485. {
  486. // get a list of all the inputs to this node
  487. Array<uint32> sourceNodes;
  488. Array<uint> sourceOutputChans;
  489. for (int i = graph.getNumConnections(); --i >= 0;)
  490. {
  491. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  492. if (c->destNodeId == node.nodeId
  493. && c->destChannelIndex == inputChan
  494. && c->channelType == AudioProcessor::ChannelTypeCV)
  495. {
  496. sourceNodes.add (c->sourceNodeId);
  497. sourceOutputChans.add (c->sourceChannelIndex);
  498. }
  499. }
  500. int bufIndex = -1;
  501. if (sourceNodes.size() == 0)
  502. {
  503. // unconnected input channel
  504. bufIndex = getReadOnlyEmptyBuffer();
  505. wassert (bufIndex >= 0);
  506. }
  507. else if (sourceNodes.size() == 1)
  508. {
  509. // channel with a straightforward single input..
  510. const uint32 srcNode = sourceNodes.getUnchecked(0);
  511. const uint srcChan = sourceOutputChans.getUnchecked(0);
  512. bufIndex = getBufferContaining (AudioProcessor::ChannelTypeCV, srcNode, srcChan);
  513. if (bufIndex < 0)
  514. {
  515. // if not found, this is probably a feedback loop
  516. bufIndex = getReadOnlyEmptyBuffer();
  517. wassert (bufIndex >= 0);
  518. }
  519. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  520. renderingOps.add (new CopyChannelOp (bufIndex, newFreeBuffer, true));
  521. bufIndex = newFreeBuffer;
  522. const int nodeDelay = getNodeDelay (srcNode);
  523. if (nodeDelay < maxLatency)
  524. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, true));
  525. }
  526. else
  527. {
  528. // channel with a mix of several inputs..
  529. {
  530. bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  531. wassert (bufIndex != 0);
  532. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeCV,
  533. sourceNodes.getUnchecked (0),
  534. sourceOutputChans.getUnchecked (0));
  535. if (srcIndex < 0)
  536. {
  537. // if not found, this is probably a feedback loop
  538. renderingOps.add (new ClearChannelOp (bufIndex, true));
  539. }
  540. else
  541. {
  542. renderingOps.add (new CopyChannelOp (srcIndex, bufIndex, true));
  543. }
  544. const int nodeDelay = getNodeDelay (sourceNodes.getFirst());
  545. if (nodeDelay < maxLatency)
  546. renderingOps.add (new DelayChannelOp (bufIndex, maxLatency - nodeDelay, true));
  547. }
  548. for (int j = 1; j < sourceNodes.size(); ++j)
  549. {
  550. int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeCV,
  551. sourceNodes.getUnchecked(j),
  552. sourceOutputChans.getUnchecked(j));
  553. if (srcIndex >= 0)
  554. {
  555. const int nodeDelay = getNodeDelay (sourceNodes.getUnchecked (j));
  556. if (nodeDelay < maxLatency)
  557. {
  558. const int bufferToDelay = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  559. renderingOps.add (new CopyChannelOp (srcIndex, bufferToDelay, true));
  560. renderingOps.add (new DelayChannelOp (bufferToDelay, maxLatency - nodeDelay, true));
  561. srcIndex = bufferToDelay;
  562. }
  563. renderingOps.add (new AddChannelOp (srcIndex, bufIndex, true));
  564. }
  565. }
  566. }
  567. CARLA_SAFE_ASSERT_CONTINUE (bufIndex >= 0);
  568. cvInChannelsToUse.add (bufIndex);
  569. markBufferAsContaining (AudioProcessor::ChannelTypeCV, bufIndex, node.nodeId, inputChan);
  570. }
  571. for (uint outputChan = 0; outputChan < numCVOuts; ++outputChan)
  572. {
  573. const int bufIndex = getFreeBuffer (AudioProcessor::ChannelTypeCV);
  574. CARLA_SAFE_ASSERT_CONTINUE (bufIndex > 0);
  575. cvOutChannelsToUse.add (bufIndex);
  576. markBufferAsContaining (AudioProcessor::ChannelTypeCV, bufIndex, node.nodeId, outputChan);
  577. }
  578. // Now the same thing for midi..
  579. Array<uint32> midiSourceNodes;
  580. for (int i = graph.getNumConnections(); --i >= 0;)
  581. {
  582. const AudioProcessorGraph::Connection* const c = graph.getConnection (i);
  583. if (c->destNodeId == node.nodeId && c->channelType == AudioProcessor::ChannelTypeMIDI)
  584. midiSourceNodes.add (c->sourceNodeId);
  585. }
  586. if (midiSourceNodes.size() == 0)
  587. {
  588. // No midi inputs..
  589. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI); // need to pick a buffer even if the processor doesn't use midi
  590. if (processor.acceptsMidi() || processor.producesMidi())
  591. renderingOps.add (new ClearMidiBufferOp (midiBufferToUse));
  592. }
  593. else if (midiSourceNodes.size() == 1)
  594. {
  595. // One midi input..
  596. midiBufferToUse = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  597. midiSourceNodes.getUnchecked(0),
  598. 0);
  599. if (midiBufferToUse >= 0)
  600. {
  601. if (isBufferNeededLater (ourRenderingIndex,
  602. AudioProcessorGraph::midiChannelIndex,
  603. midiSourceNodes.getUnchecked(0),
  604. AudioProcessorGraph::midiChannelIndex))
  605. {
  606. // can't mess up this channel because it's needed later by another node, so we
  607. // need to use a copy of it..
  608. const int newFreeBuffer = getFreeBuffer (AudioProcessor::ChannelTypeMIDI);
  609. renderingOps.add (new CopyMidiBufferOp (midiBufferToUse, newFreeBuffer));
  610. midiBufferToUse = newFreeBuffer;
  611. }
  612. }
  613. else
  614. {
  615. // probably a feedback loop, so just use an empty one..
  616. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI); // need to pick a buffer even if the processor doesn't use midi
  617. }
  618. }
  619. else
  620. {
  621. // More than one midi input being mixed..
  622. int reusableInputIndex = -1;
  623. for (int i = 0; i < midiSourceNodes.size(); ++i)
  624. {
  625. const int sourceBufIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  626. midiSourceNodes.getUnchecked(i),
  627. 0);
  628. if (sourceBufIndex >= 0
  629. && ! isBufferNeededLater (ourRenderingIndex,
  630. AudioProcessorGraph::midiChannelIndex,
  631. midiSourceNodes.getUnchecked(i),
  632. AudioProcessorGraph::midiChannelIndex))
  633. {
  634. // we've found one of our input buffers that can be re-used..
  635. reusableInputIndex = i;
  636. midiBufferToUse = sourceBufIndex;
  637. break;
  638. }
  639. }
  640. if (reusableInputIndex < 0)
  641. {
  642. // can't re-use any of our input buffers, so get a new one and copy everything into it..
  643. midiBufferToUse = getFreeBuffer (AudioProcessor::ChannelTypeMIDI);
  644. wassert (midiBufferToUse >= 0);
  645. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  646. midiSourceNodes.getUnchecked(0),
  647. 0);
  648. if (srcIndex >= 0)
  649. renderingOps.add (new CopyMidiBufferOp (srcIndex, midiBufferToUse));
  650. else
  651. renderingOps.add (new ClearMidiBufferOp (midiBufferToUse));
  652. reusableInputIndex = 0;
  653. }
  654. for (int j = 0; j < midiSourceNodes.size(); ++j)
  655. {
  656. if (j != reusableInputIndex)
  657. {
  658. const int srcIndex = getBufferContaining (AudioProcessor::ChannelTypeMIDI,
  659. midiSourceNodes.getUnchecked(j),
  660. 0);
  661. if (srcIndex >= 0)
  662. renderingOps.add (new AddMidiBufferOp (srcIndex, midiBufferToUse));
  663. }
  664. }
  665. }
  666. if (processor.producesMidi())
  667. markBufferAsContaining (AudioProcessor::ChannelTypeMIDI,
  668. midiBufferToUse, node.nodeId,
  669. 0);
  670. setNodeDelay (node.nodeId, maxLatency + processor.getLatencySamples());
  671. if (numAudioOuts == 0)
  672. totalLatency = maxLatency;
  673. renderingOps.add (new ProcessBufferOp (&node,
  674. audioChannelsToUse,
  675. totalAudioChans,
  676. cvInChannelsToUse,
  677. cvOutChannelsToUse,
  678. midiBufferToUse));
  679. }
  680. //==============================================================================
  681. int getFreeBuffer (const AudioProcessor::ChannelType channelType)
  682. {
  683. switch (channelType)
  684. {
  685. case AudioProcessor::ChannelTypeAudio:
  686. for (int i = 1; i < audioNodeIds.size(); ++i)
  687. if (audioNodeIds.getUnchecked(i) == freeNodeID)
  688. return i;
  689. audioNodeIds.add ((uint32) freeNodeID);
  690. audioChannels.add (0);
  691. return audioNodeIds.size() - 1;
  692. case AudioProcessor::ChannelTypeCV:
  693. for (int i = 1; i < cvNodeIds.size(); ++i)
  694. if (cvNodeIds.getUnchecked(i) == freeNodeID)
  695. return i;
  696. cvNodeIds.add ((uint32) freeNodeID);
  697. cvChannels.add (0);
  698. return cvNodeIds.size() - 1;
  699. case AudioProcessor::ChannelTypeMIDI:
  700. for (int i = 1; i < midiNodeIds.size(); ++i)
  701. if (midiNodeIds.getUnchecked(i) == freeNodeID)
  702. return i;
  703. midiNodeIds.add ((uint32) freeNodeID);
  704. return midiNodeIds.size() - 1;
  705. }
  706. return -1;
  707. }
  708. int getReadOnlyEmptyBuffer() const noexcept
  709. {
  710. return 0;
  711. }
  712. int getBufferContaining (const AudioProcessor::ChannelType channelType,
  713. const uint32 nodeId,
  714. const uint outputChannel) const noexcept
  715. {
  716. switch (channelType)
  717. {
  718. case AudioProcessor::ChannelTypeAudio:
  719. for (int i = audioNodeIds.size(); --i >= 0;)
  720. if (audioNodeIds.getUnchecked(i) == nodeId && audioChannels.getUnchecked(i) == outputChannel)
  721. return i;
  722. break;
  723. case AudioProcessor::ChannelTypeCV:
  724. for (int i = cvNodeIds.size(); --i >= 0;)
  725. if (cvNodeIds.getUnchecked(i) == nodeId && cvChannels.getUnchecked(i) == outputChannel)
  726. return i;
  727. break;
  728. case AudioProcessor::ChannelTypeMIDI:
  729. for (int i = midiNodeIds.size(); --i >= 0;)
  730. if (midiNodeIds.getUnchecked(i) == nodeId)
  731. return i;
  732. break;
  733. }
  734. return -1;
  735. }
  736. void markAnyUnusedBuffersAsFree (const int stepIndex)
  737. {
  738. for (int i = 0; i < audioNodeIds.size(); ++i)
  739. {
  740. if (isNodeBusy (audioNodeIds.getUnchecked(i))
  741. && ! isBufferNeededLater (stepIndex, -1,
  742. audioNodeIds.getUnchecked(i),
  743. audioChannels.getUnchecked(i)))
  744. {
  745. audioNodeIds.set (i, (uint32) freeNodeID);
  746. }
  747. }
  748. // NOTE: CV skipped on purpose
  749. for (int i = 0; i < midiNodeIds.size(); ++i)
  750. {
  751. if (isNodeBusy (midiNodeIds.getUnchecked(i))
  752. && ! isBufferNeededLater (stepIndex, -1,
  753. midiNodeIds.getUnchecked(i),
  754. AudioProcessorGraph::midiChannelIndex))
  755. {
  756. midiNodeIds.set (i, (uint32) freeNodeID);
  757. }
  758. }
  759. }
  760. bool isBufferNeededLater (int stepIndexToSearchFrom,
  761. uint inputChannelOfIndexToIgnore,
  762. const uint32 nodeId,
  763. const uint outputChanIndex) const
  764. {
  765. while (stepIndexToSearchFrom < orderedNodes.size())
  766. {
  767. const AudioProcessorGraph::Node* const node = (const AudioProcessorGraph::Node*) orderedNodes.getUnchecked (stepIndexToSearchFrom);
  768. if (outputChanIndex == AudioProcessorGraph::midiChannelIndex)
  769. {
  770. if (inputChannelOfIndexToIgnore != AudioProcessorGraph::midiChannelIndex
  771. && graph.getConnectionBetween (AudioProcessor::ChannelTypeAudio,
  772. nodeId, AudioProcessorGraph::midiChannelIndex,
  773. node->nodeId, AudioProcessorGraph::midiChannelIndex) != nullptr)
  774. return true;
  775. }
  776. else
  777. {
  778. for (uint i = 0; i < node->getProcessor()->getTotalNumInputChannels(AudioProcessor::ChannelTypeAudio); ++i)
  779. if (i != inputChannelOfIndexToIgnore
  780. && graph.getConnectionBetween (AudioProcessor::ChannelTypeAudio,
  781. nodeId, outputChanIndex,
  782. node->nodeId, i) != nullptr)
  783. return true;
  784. }
  785. inputChannelOfIndexToIgnore = (uint)-1;
  786. ++stepIndexToSearchFrom;
  787. }
  788. return false;
  789. }
  790. void markBufferAsContaining (const AudioProcessor::ChannelType channelType,
  791. int bufferNum, uint32 nodeId, int outputIndex)
  792. {
  793. switch (channelType)
  794. {
  795. case AudioProcessor::ChannelTypeAudio:
  796. CARLA_SAFE_ASSERT_BREAK (bufferNum >= 0 && bufferNum < audioNodeIds.size());
  797. audioNodeIds.set (bufferNum, nodeId);
  798. audioChannels.set (bufferNum, outputIndex);
  799. break;
  800. case AudioProcessor::ChannelTypeCV:
  801. CARLA_SAFE_ASSERT_BREAK (bufferNum >= 0 && bufferNum < cvNodeIds.size());
  802. cvNodeIds.set (bufferNum, nodeId);
  803. cvChannels.set (bufferNum, outputIndex);
  804. break;
  805. case AudioProcessor::ChannelTypeMIDI:
  806. CARLA_SAFE_ASSERT_BREAK (bufferNum > 0 && bufferNum < midiNodeIds.size());
  807. midiNodeIds.set (bufferNum, nodeId);
  808. break;
  809. }
  810. }
  811. CARLA_DECLARE_NON_COPY_CLASS (RenderingOpSequenceCalculator)
  812. };
  813. //==============================================================================
  814. // Holds a fast lookup table for checking which nodes are inputs to others.
  815. class ConnectionLookupTable
  816. {
  817. public:
  818. explicit ConnectionLookupTable (const OwnedArray<AudioProcessorGraph::Connection>& connections)
  819. {
  820. for (int i = 0; i < static_cast<int>(connections.size()); ++i)
  821. {
  822. const AudioProcessorGraph::Connection* const c = connections.getUnchecked(i);
  823. int index;
  824. Entry* entry = findEntry (c->destNodeId, index);
  825. if (entry == nullptr)
  826. {
  827. entry = new Entry (c->destNodeId);
  828. entries.insert (index, entry);
  829. }
  830. entry->srcNodes.add (c->sourceNodeId);
  831. }
  832. }
  833. bool isAnInputTo (const uint32 possibleInputId,
  834. const uint32 possibleDestinationId) const noexcept
  835. {
  836. return isAnInputToRecursive (possibleInputId, possibleDestinationId, entries.size());
  837. }
  838. private:
  839. //==============================================================================
  840. struct Entry
  841. {
  842. explicit Entry (const uint32 destNodeId_) noexcept : destNodeId (destNodeId_) {}
  843. const uint32 destNodeId;
  844. SortedSet<uint32> srcNodes;
  845. CARLA_DECLARE_NON_COPY_CLASS (Entry)
  846. };
  847. OwnedArray<Entry> entries;
  848. bool isAnInputToRecursive (const uint32 possibleInputId,
  849. const uint32 possibleDestinationId,
  850. int recursionCheck) const noexcept
  851. {
  852. int index;
  853. if (const Entry* const entry = findEntry (possibleDestinationId, index))
  854. {
  855. const SortedSet<uint32>& srcNodes = entry->srcNodes;
  856. if (srcNodes.contains (possibleInputId))
  857. return true;
  858. if (--recursionCheck >= 0)
  859. {
  860. for (int i = 0; i < srcNodes.size(); ++i)
  861. if (isAnInputToRecursive (possibleInputId, srcNodes.getUnchecked(i), recursionCheck))
  862. return true;
  863. }
  864. }
  865. return false;
  866. }
  867. Entry* findEntry (const uint32 destNodeId, int& insertIndex) const noexcept
  868. {
  869. Entry* result = nullptr;
  870. int start = 0;
  871. int end = entries.size();
  872. for (;;)
  873. {
  874. if (start >= end)
  875. {
  876. break;
  877. }
  878. else if (destNodeId == entries.getUnchecked (start)->destNodeId)
  879. {
  880. result = entries.getUnchecked (start);
  881. break;
  882. }
  883. else
  884. {
  885. const int halfway = (start + end) / 2;
  886. if (halfway == start)
  887. {
  888. if (destNodeId >= entries.getUnchecked (halfway)->destNodeId)
  889. ++start;
  890. break;
  891. }
  892. else if (destNodeId >= entries.getUnchecked (halfway)->destNodeId)
  893. start = halfway;
  894. else
  895. end = halfway;
  896. }
  897. }
  898. insertIndex = start;
  899. return result;
  900. }
  901. CARLA_DECLARE_NON_COPY_CLASS (ConnectionLookupTable)
  902. };
  903. //==============================================================================
  904. struct ConnectionSorter
  905. {
  906. static int compareElements (const AudioProcessorGraph::Connection* const first,
  907. const AudioProcessorGraph::Connection* const second) noexcept
  908. {
  909. if (first->sourceNodeId < second->sourceNodeId) return -1;
  910. if (first->sourceNodeId > second->sourceNodeId) return 1;
  911. if (first->destNodeId < second->destNodeId) return -1;
  912. if (first->destNodeId > second->destNodeId) return 1;
  913. if (first->sourceChannelIndex < second->sourceChannelIndex) return -1;
  914. if (first->sourceChannelIndex > second->sourceChannelIndex) return 1;
  915. if (first->destChannelIndex < second->destChannelIndex) return -1;
  916. if (first->destChannelIndex > second->destChannelIndex) return 1;
  917. return 0;
  918. }
  919. };
  920. }
  921. //==============================================================================
  922. AudioProcessorGraph::Connection::Connection (ChannelType ct,
  923. const uint32 sourceID, const uint sourceChannel,
  924. const uint32 destID, const uint destChannel) noexcept
  925. : channelType (ct),
  926. sourceNodeId (sourceID), sourceChannelIndex (sourceChannel),
  927. destNodeId (destID), destChannelIndex (destChannel)
  928. {
  929. }
  930. //==============================================================================
  931. AudioProcessorGraph::Node::Node (const uint32 nodeID, AudioProcessor* const p) noexcept
  932. : nodeId (nodeID), processor (p), isPrepared (false)
  933. {
  934. wassert (processor != nullptr);
  935. }
  936. void AudioProcessorGraph::Node::prepare (const double newSampleRate, const int newBlockSize,
  937. AudioProcessorGraph* const graph)
  938. {
  939. if (! isPrepared)
  940. {
  941. isPrepared = true;
  942. setParentGraph (graph);
  943. processor->setRateAndBufferSizeDetails (newSampleRate, newBlockSize);
  944. processor->prepareToPlay (newSampleRate, newBlockSize);
  945. }
  946. }
  947. void AudioProcessorGraph::Node::unprepare()
  948. {
  949. if (isPrepared)
  950. {
  951. isPrepared = false;
  952. processor->releaseResources();
  953. }
  954. }
  955. void AudioProcessorGraph::Node::setParentGraph (AudioProcessorGraph* const graph) const
  956. {
  957. if (AudioProcessorGraph::AudioGraphIOProcessor* const ioProc
  958. = dynamic_cast<AudioProcessorGraph::AudioGraphIOProcessor*> (processor.get()))
  959. ioProc->setParentGraph (graph);
  960. }
  961. //==============================================================================
  962. struct AudioProcessorGraph::AudioProcessorGraphBufferHelpers
  963. {
  964. AudioProcessorGraphBufferHelpers() noexcept
  965. : currentAudioInputBuffer (nullptr),
  966. currentCVInputBuffer (nullptr) {}
  967. void setRenderingBufferSize (int newNumAudioChannels, int newNumCVChannels, int newNumSamples) noexcept
  968. {
  969. renderingAudioBuffers.setSize (newNumAudioChannels, newNumSamples);
  970. renderingAudioBuffers.clear();
  971. renderingCVBuffers.setSize (newNumCVChannels, newNumSamples);
  972. renderingCVBuffers.clear();
  973. }
  974. void release() noexcept
  975. {
  976. renderingAudioBuffers.setSize (1, 1);
  977. currentAudioInputBuffer = nullptr;
  978. currentCVInputBuffer = nullptr;
  979. currentAudioOutputBuffer.setSize (1, 1);
  980. currentCVOutputBuffer.setSize (1, 1);
  981. renderingCVBuffers.setSize (1, 1);
  982. }
  983. void prepareInOutBuffers (int newNumAudioChannels, int newNumCVChannels, int newNumSamples) noexcept
  984. {
  985. currentAudioInputBuffer = nullptr;
  986. currentCVInputBuffer = nullptr;
  987. currentAudioOutputBuffer.setSize (newNumAudioChannels, newNumSamples);
  988. currentCVOutputBuffer.setSize (newNumCVChannels, newNumSamples);
  989. }
  990. AudioSampleBuffer renderingAudioBuffers;
  991. AudioSampleBuffer renderingCVBuffers;
  992. AudioSampleBuffer* currentAudioInputBuffer;
  993. const AudioSampleBuffer* currentCVInputBuffer;
  994. AudioSampleBuffer currentAudioOutputBuffer;
  995. AudioSampleBuffer currentCVOutputBuffer;
  996. };
  997. //==============================================================================
  998. AudioProcessorGraph::AudioProcessorGraph()
  999. : lastNodeId (0), audioAndCVBuffers (new AudioProcessorGraphBufferHelpers),
  1000. currentMidiInputBuffer (nullptr), isPrepared (false), needsReorder (false)
  1001. {
  1002. }
  1003. AudioProcessorGraph::~AudioProcessorGraph()
  1004. {
  1005. clearRenderingSequence();
  1006. clear();
  1007. }
  1008. const String AudioProcessorGraph::getName() const
  1009. {
  1010. return "Audio Graph";
  1011. }
  1012. //==============================================================================
  1013. void AudioProcessorGraph::clear()
  1014. {
  1015. nodes.clear();
  1016. connections.clear();
  1017. needsReorder = true;
  1018. }
  1019. AudioProcessorGraph::Node* AudioProcessorGraph::getNodeForId (const uint32 nodeId) const
  1020. {
  1021. for (int i = nodes.size(); --i >= 0;)
  1022. if (nodes.getUnchecked(i)->nodeId == nodeId)
  1023. return nodes.getUnchecked(i);
  1024. return nullptr;
  1025. }
  1026. AudioProcessorGraph::Node* AudioProcessorGraph::addNode (AudioProcessor* const newProcessor, uint32 nodeId)
  1027. {
  1028. CARLA_SAFE_ASSERT_RETURN (newProcessor != nullptr && newProcessor != this, nullptr);
  1029. for (int i = nodes.size(); --i >= 0;)
  1030. {
  1031. CARLA_SAFE_ASSERT_RETURN (nodes.getUnchecked(i)->getProcessor() != newProcessor, nullptr);
  1032. }
  1033. if (nodeId == 0)
  1034. {
  1035. nodeId = ++lastNodeId;
  1036. }
  1037. else
  1038. {
  1039. // you can't add a node with an id that already exists in the graph..
  1040. CARLA_SAFE_ASSERT_RETURN (getNodeForId (nodeId) == nullptr, nullptr);
  1041. removeNode (nodeId);
  1042. if (nodeId > lastNodeId)
  1043. lastNodeId = nodeId;
  1044. }
  1045. Node* const n = new Node (nodeId, newProcessor);
  1046. nodes.add (n);
  1047. if (isPrepared)
  1048. needsReorder = true;
  1049. n->setParentGraph (this);
  1050. return n;
  1051. }
  1052. bool AudioProcessorGraph::removeNode (const uint32 nodeId)
  1053. {
  1054. disconnectNode (nodeId);
  1055. for (int i = nodes.size(); --i >= 0;)
  1056. {
  1057. if (nodes.getUnchecked(i)->nodeId == nodeId)
  1058. {
  1059. nodes.remove (i);
  1060. if (isPrepared)
  1061. needsReorder = true;
  1062. return true;
  1063. }
  1064. }
  1065. return false;
  1066. }
  1067. bool AudioProcessorGraph::removeNode (Node* node)
  1068. {
  1069. CARLA_SAFE_ASSERT_RETURN(node != nullptr, false);
  1070. return removeNode (node->nodeId);
  1071. }
  1072. //==============================================================================
  1073. const AudioProcessorGraph::Connection* AudioProcessorGraph::getConnectionBetween (const ChannelType ct,
  1074. const uint32 sourceNodeId,
  1075. const uint sourceChannelIndex,
  1076. const uint32 destNodeId,
  1077. const uint destChannelIndex) const
  1078. {
  1079. const Connection c (ct, sourceNodeId, sourceChannelIndex, destNodeId, destChannelIndex);
  1080. GraphRenderingOps::ConnectionSorter sorter;
  1081. return connections [connections.indexOfSorted (sorter, &c)];
  1082. }
  1083. bool AudioProcessorGraph::isConnected (const uint32 possibleSourceNodeId,
  1084. const uint32 possibleDestNodeId) const
  1085. {
  1086. for (int i = connections.size(); --i >= 0;)
  1087. {
  1088. const Connection* const c = connections.getUnchecked(i);
  1089. if (c->sourceNodeId == possibleSourceNodeId
  1090. && c->destNodeId == possibleDestNodeId)
  1091. {
  1092. return true;
  1093. }
  1094. }
  1095. return false;
  1096. }
  1097. bool AudioProcessorGraph::canConnect (ChannelType ct,
  1098. const uint32 sourceNodeId,
  1099. const uint sourceChannelIndex,
  1100. const uint32 destNodeId,
  1101. const uint destChannelIndex) const
  1102. {
  1103. if (sourceNodeId == destNodeId)
  1104. return false;
  1105. if ((destChannelIndex == midiChannelIndex) != (sourceChannelIndex == midiChannelIndex))
  1106. return false;
  1107. const Node* const source = getNodeForId (sourceNodeId);
  1108. if (source == nullptr
  1109. || (sourceChannelIndex != midiChannelIndex && sourceChannelIndex >= source->processor->getTotalNumOutputChannels(ct))
  1110. || (sourceChannelIndex == midiChannelIndex && ! source->processor->producesMidi()))
  1111. return false;
  1112. const Node* const dest = getNodeForId (destNodeId);
  1113. if (dest == nullptr
  1114. || (destChannelIndex != midiChannelIndex && destChannelIndex >= dest->processor->getTotalNumInputChannels(ct))
  1115. || (destChannelIndex == midiChannelIndex && ! dest->processor->acceptsMidi()))
  1116. return false;
  1117. return getConnectionBetween (ct,
  1118. sourceNodeId, sourceChannelIndex,
  1119. destNodeId, destChannelIndex) == nullptr;
  1120. }
  1121. bool AudioProcessorGraph::addConnection (const ChannelType ct,
  1122. const uint32 sourceNodeId,
  1123. const uint sourceChannelIndex,
  1124. const uint32 destNodeId,
  1125. const uint destChannelIndex)
  1126. {
  1127. if (! canConnect (ct, sourceNodeId, sourceChannelIndex, destNodeId, destChannelIndex))
  1128. return false;
  1129. GraphRenderingOps::ConnectionSorter sorter;
  1130. connections.addSorted (sorter, new Connection (ct,
  1131. sourceNodeId, sourceChannelIndex,
  1132. destNodeId, destChannelIndex));
  1133. if (isPrepared)
  1134. needsReorder = true;
  1135. return true;
  1136. }
  1137. void AudioProcessorGraph::removeConnection (const int index)
  1138. {
  1139. connections.remove (index);
  1140. if (isPrepared)
  1141. needsReorder = true;
  1142. }
  1143. bool AudioProcessorGraph::removeConnection (const ChannelType ct,
  1144. const uint32 sourceNodeId, const uint sourceChannelIndex,
  1145. const uint32 destNodeId, const uint destChannelIndex)
  1146. {
  1147. bool doneAnything = false;
  1148. for (int i = connections.size(); --i >= 0;)
  1149. {
  1150. const Connection* const c = connections.getUnchecked(i);
  1151. if (c->channelType == ct
  1152. && c->sourceNodeId == sourceNodeId
  1153. && c->destNodeId == destNodeId
  1154. && c->sourceChannelIndex == sourceChannelIndex
  1155. && c->destChannelIndex == destChannelIndex)
  1156. {
  1157. removeConnection (i);
  1158. doneAnything = true;
  1159. }
  1160. }
  1161. return doneAnything;
  1162. }
  1163. bool AudioProcessorGraph::disconnectNode (const uint32 nodeId)
  1164. {
  1165. bool doneAnything = false;
  1166. for (int i = connections.size(); --i >= 0;)
  1167. {
  1168. const Connection* const c = connections.getUnchecked(i);
  1169. if (c->sourceNodeId == nodeId || c->destNodeId == nodeId)
  1170. {
  1171. removeConnection (i);
  1172. doneAnything = true;
  1173. }
  1174. }
  1175. return doneAnything;
  1176. }
  1177. bool AudioProcessorGraph::isConnectionLegal (const Connection* const c) const
  1178. {
  1179. CARLA_SAFE_ASSERT_RETURN (c != nullptr, false);
  1180. const Node* const source = getNodeForId (c->sourceNodeId);
  1181. const Node* const dest = getNodeForId (c->destNodeId);
  1182. return source != nullptr
  1183. && dest != nullptr
  1184. && (c->sourceChannelIndex != midiChannelIndex ? (c->sourceChannelIndex < source->processor->getTotalNumOutputChannels(c->channelType))
  1185. : source->processor->producesMidi())
  1186. && (c->destChannelIndex != midiChannelIndex ? (c->destChannelIndex < dest->processor->getTotalNumInputChannels(c->channelType))
  1187. : dest->processor->acceptsMidi());
  1188. }
  1189. bool AudioProcessorGraph::removeIllegalConnections()
  1190. {
  1191. bool doneAnything = false;
  1192. for (int i = connections.size(); --i >= 0;)
  1193. {
  1194. if (! isConnectionLegal (connections.getUnchecked(i)))
  1195. {
  1196. removeConnection (i);
  1197. doneAnything = true;
  1198. }
  1199. }
  1200. return doneAnything;
  1201. }
  1202. //==============================================================================
  1203. static void deleteRenderOpArray (Array<void*>& ops)
  1204. {
  1205. for (int i = ops.size(); --i >= 0;)
  1206. delete static_cast<GraphRenderingOps::AudioGraphRenderingOpBase*> (ops.getUnchecked(i));
  1207. }
  1208. void AudioProcessorGraph::clearRenderingSequence()
  1209. {
  1210. Array<void*> oldOps;
  1211. {
  1212. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1213. renderingOps.swapWith (oldOps);
  1214. }
  1215. deleteRenderOpArray (oldOps);
  1216. }
  1217. bool AudioProcessorGraph::isAnInputTo (const uint32 possibleInputId,
  1218. const uint32 possibleDestinationId,
  1219. const int recursionCheck) const
  1220. {
  1221. if (recursionCheck > 0)
  1222. {
  1223. for (int i = connections.size(); --i >= 0;)
  1224. {
  1225. const AudioProcessorGraph::Connection* const c = connections.getUnchecked (i);
  1226. if (c->destNodeId == possibleDestinationId
  1227. && (c->sourceNodeId == possibleInputId
  1228. || isAnInputTo (possibleInputId, c->sourceNodeId, recursionCheck - 1)))
  1229. return true;
  1230. }
  1231. }
  1232. return false;
  1233. }
  1234. void AudioProcessorGraph::buildRenderingSequence()
  1235. {
  1236. Array<void*> newRenderingOps;
  1237. int numAudioRenderingBuffersNeeded = 2;
  1238. int numCVRenderingBuffersNeeded = 0;
  1239. int numMidiBuffersNeeded = 1;
  1240. {
  1241. const CarlaRecursiveMutexLocker cml (reorderMutex);
  1242. Array<Node*> orderedNodes;
  1243. {
  1244. const GraphRenderingOps::ConnectionLookupTable table (connections);
  1245. for (int i = 0; i < nodes.size(); ++i)
  1246. {
  1247. Node* const node = nodes.getUnchecked(i);
  1248. node->prepare (getSampleRate(), getBlockSize(), this);
  1249. int j = 0;
  1250. for (; j < orderedNodes.size(); ++j)
  1251. if (table.isAnInputTo (node->nodeId, ((Node*) orderedNodes.getUnchecked(j))->nodeId))
  1252. break;
  1253. orderedNodes.insert (j, node);
  1254. }
  1255. }
  1256. GraphRenderingOps::RenderingOpSequenceCalculator calculator (*this, orderedNodes, newRenderingOps);
  1257. numAudioRenderingBuffersNeeded = calculator.getNumAudioBuffersNeeded();
  1258. numCVRenderingBuffersNeeded = calculator.getNumCVBuffersNeeded();
  1259. numMidiBuffersNeeded = calculator.getNumMidiBuffersNeeded();
  1260. }
  1261. {
  1262. // swap over to the new rendering sequence..
  1263. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1264. audioAndCVBuffers->setRenderingBufferSize (numAudioRenderingBuffersNeeded,
  1265. numCVRenderingBuffersNeeded,
  1266. getBlockSize());
  1267. for (int i = static_cast<int>(midiBuffers.size()); --i >= 0;)
  1268. midiBuffers.getUnchecked(i)->clear();
  1269. while (static_cast<int>(midiBuffers.size()) < numMidiBuffersNeeded)
  1270. midiBuffers.add (new MidiBuffer());
  1271. renderingOps.swapWith (newRenderingOps);
  1272. }
  1273. // delete the old ones..
  1274. deleteRenderOpArray (newRenderingOps);
  1275. }
  1276. //==============================================================================
  1277. void AudioProcessorGraph::prepareToPlay (double sampleRate, int estimatedSamplesPerBlock)
  1278. {
  1279. setRateAndBufferSizeDetails(sampleRate, estimatedSamplesPerBlock);
  1280. audioAndCVBuffers->prepareInOutBuffers(jmax(1U, getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio)),
  1281. jmax(1U, getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV)),
  1282. estimatedSamplesPerBlock);
  1283. currentMidiInputBuffer = nullptr;
  1284. currentMidiOutputBuffer.clear();
  1285. clearRenderingSequence();
  1286. buildRenderingSequence();
  1287. isPrepared = true;
  1288. }
  1289. void AudioProcessorGraph::releaseResources()
  1290. {
  1291. isPrepared = false;
  1292. for (int i = 0; i < nodes.size(); ++i)
  1293. nodes.getUnchecked(i)->unprepare();
  1294. audioAndCVBuffers->release();
  1295. midiBuffers.clear();
  1296. currentMidiInputBuffer = nullptr;
  1297. currentMidiOutputBuffer.clear();
  1298. }
  1299. void AudioProcessorGraph::reset()
  1300. {
  1301. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1302. for (int i = 0; i < nodes.size(); ++i)
  1303. nodes.getUnchecked(i)->getProcessor()->reset();
  1304. }
  1305. void AudioProcessorGraph::setNonRealtime (bool isProcessingNonRealtime) noexcept
  1306. {
  1307. const CarlaRecursiveMutexLocker cml (getCallbackLock());
  1308. AudioProcessor::setNonRealtime (isProcessingNonRealtime);
  1309. for (int i = 0; i < nodes.size(); ++i)
  1310. nodes.getUnchecked(i)->getProcessor()->setNonRealtime (isProcessingNonRealtime);
  1311. }
  1312. /*
  1313. void AudioProcessorGraph::processAudio (AudioSampleBuffer& audioBuffer, MidiBuffer& midiMessages)
  1314. {
  1315. AudioSampleBuffer*& currentAudioInputBuffer = audioAndCVBuffers->currentAudioInputBuffer;
  1316. AudioSampleBuffer& currentAudioOutputBuffer = audioAndCVBuffers->currentAudioOutputBuffer;
  1317. AudioSampleBuffer& renderingAudioBuffers = audioAndCVBuffers->renderingAudioBuffers;
  1318. AudioSampleBuffer& renderingCVBuffers = audioAndCVBuffers->renderingCVBuffers;
  1319. const int numSamples = audioBuffer.getNumSamples();
  1320. if (! audioAndCVBuffers->currentAudioOutputBuffer.setSizeRT(numSamples))
  1321. return;
  1322. if (! audioAndCVBuffers->renderingAudioBuffers.setSizeRT(numSamples))
  1323. return;
  1324. if (! audioAndCVBuffers->renderingCVBuffers.setSizeRT(numSamples))
  1325. return;
  1326. currentAudioInputBuffer = &audioBuffer;
  1327. currentAudioOutputBuffer.clear();
  1328. currentMidiInputBuffer = &midiMessages;
  1329. currentMidiOutputBuffer.clear();
  1330. for (int i = 0; i < renderingOps.size(); ++i)
  1331. {
  1332. GraphRenderingOps::AudioGraphRenderingOpBase* const op
  1333. = (GraphRenderingOps::AudioGraphRenderingOpBase*) renderingOps.getUnchecked(i);
  1334. op->perform (renderingAudioBuffers, renderingCVBuffers, midiBuffers, numSamples);
  1335. }
  1336. for (uint32_t i = 0; i < audioBuffer.getNumChannels(); ++i)
  1337. audioBuffer.copyFrom (i, 0, currentAudioOutputBuffer, i, 0, numSamples);
  1338. midiMessages.clear();
  1339. midiMessages.addEvents (currentMidiOutputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1340. }
  1341. */
  1342. void AudioProcessorGraph::processAudioAndCV (AudioSampleBuffer& audioBuffer,
  1343. const AudioSampleBuffer& cvInBuffer,
  1344. AudioSampleBuffer& cvOutBuffer,
  1345. MidiBuffer& midiMessages)
  1346. {
  1347. AudioSampleBuffer*& currentAudioInputBuffer = audioAndCVBuffers->currentAudioInputBuffer;
  1348. const AudioSampleBuffer*& currentCVInputBuffer = audioAndCVBuffers->currentCVInputBuffer;
  1349. AudioSampleBuffer& currentAudioOutputBuffer = audioAndCVBuffers->currentAudioOutputBuffer;
  1350. AudioSampleBuffer& currentCVOutputBuffer = audioAndCVBuffers->currentCVOutputBuffer;
  1351. AudioSampleBuffer& renderingAudioBuffers = audioAndCVBuffers->renderingAudioBuffers;
  1352. AudioSampleBuffer& renderingCVBuffers = audioAndCVBuffers->renderingCVBuffers;
  1353. const int numSamples = audioBuffer.getNumSamples();
  1354. if (! audioAndCVBuffers->currentAudioOutputBuffer.setSizeRT(numSamples))
  1355. return;
  1356. if (! audioAndCVBuffers->currentCVOutputBuffer.setSizeRT(numSamples))
  1357. return;
  1358. if (! audioAndCVBuffers->renderingAudioBuffers.setSizeRT(numSamples))
  1359. return;
  1360. if (! audioAndCVBuffers->renderingCVBuffers.setSizeRT(numSamples))
  1361. return;
  1362. currentAudioInputBuffer = &audioBuffer;
  1363. currentCVInputBuffer = &cvInBuffer;
  1364. currentAudioOutputBuffer.clear();
  1365. currentCVOutputBuffer.clear();
  1366. currentMidiInputBuffer = &midiMessages;
  1367. currentMidiOutputBuffer.clear();
  1368. for (int i = 0; i < renderingOps.size(); ++i)
  1369. {
  1370. GraphRenderingOps::AudioGraphRenderingOpBase* const op
  1371. = (GraphRenderingOps::AudioGraphRenderingOpBase*) renderingOps.getUnchecked(i);
  1372. op->perform (renderingAudioBuffers, renderingCVBuffers, midiBuffers, numSamples);
  1373. }
  1374. for (uint32_t i = 0; i < audioBuffer.getNumChannels(); ++i)
  1375. audioBuffer.copyFrom (i, 0, currentAudioOutputBuffer, i, 0, numSamples);
  1376. for (uint32_t i = 0; i < cvOutBuffer.getNumChannels(); ++i)
  1377. cvOutBuffer.copyFrom (i, 0, currentCVOutputBuffer, i, 0, numSamples);
  1378. midiMessages.clear();
  1379. midiMessages.addEvents (currentMidiOutputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1380. }
  1381. bool AudioProcessorGraph::acceptsMidi() const { return true; }
  1382. bool AudioProcessorGraph::producesMidi() const { return true; }
  1383. /*
  1384. void AudioProcessorGraph::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
  1385. {
  1386. processAudio (buffer, midiMessages);
  1387. }
  1388. */
  1389. void AudioProcessorGraph::processBlockWithCV (AudioSampleBuffer& audioBuffer,
  1390. const AudioSampleBuffer& cvInBuffer,
  1391. AudioSampleBuffer& cvOutBuffer,
  1392. MidiBuffer& midiMessages)
  1393. {
  1394. processAudioAndCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  1395. }
  1396. void AudioProcessorGraph::reorderNowIfNeeded()
  1397. {
  1398. if (needsReorder)
  1399. {
  1400. needsReorder = false;
  1401. buildRenderingSequence();
  1402. }
  1403. }
  1404. const CarlaRecursiveMutex& AudioProcessorGraph::getReorderMutex() const
  1405. {
  1406. return reorderMutex;
  1407. }
  1408. //==============================================================================
  1409. AudioProcessorGraph::AudioGraphIOProcessor::AudioGraphIOProcessor (const IODeviceType deviceType)
  1410. : type (deviceType), graph (nullptr)
  1411. {
  1412. }
  1413. AudioProcessorGraph::AudioGraphIOProcessor::~AudioGraphIOProcessor()
  1414. {
  1415. }
  1416. const String AudioProcessorGraph::AudioGraphIOProcessor::getName() const
  1417. {
  1418. switch (type)
  1419. {
  1420. case audioOutputNode: return "Audio Output";
  1421. case audioInputNode: return "Audio Input";
  1422. case cvOutputNode: return "CV Output";
  1423. case cvInputNode: return "CV Input";
  1424. case midiOutputNode: return "Midi Output";
  1425. case midiInputNode: return "Midi Input";
  1426. default: break;
  1427. }
  1428. return String();
  1429. }
  1430. void AudioProcessorGraph::AudioGraphIOProcessor::prepareToPlay (double, int)
  1431. {
  1432. CARLA_SAFE_ASSERT (graph != nullptr);
  1433. }
  1434. void AudioProcessorGraph::AudioGraphIOProcessor::releaseResources()
  1435. {
  1436. }
  1437. void AudioProcessorGraph::AudioGraphIOProcessor::processAudioAndCV (AudioSampleBuffer& audioBuffer,
  1438. const AudioSampleBuffer& cvInBuffer,
  1439. AudioSampleBuffer& cvOutBuffer,
  1440. MidiBuffer& midiMessages)
  1441. {
  1442. CARLA_SAFE_ASSERT_RETURN(graph != nullptr,);
  1443. switch (type)
  1444. {
  1445. case audioOutputNode:
  1446. {
  1447. AudioSampleBuffer& currentAudioOutputBuffer =
  1448. graph->audioAndCVBuffers->currentAudioOutputBuffer;
  1449. for (int i = jmin (currentAudioOutputBuffer.getNumChannels(),
  1450. audioBuffer.getNumChannels()); --i >= 0;)
  1451. {
  1452. currentAudioOutputBuffer.addFrom (i, 0, audioBuffer, i, 0, audioBuffer.getNumSamples());
  1453. }
  1454. break;
  1455. }
  1456. case audioInputNode:
  1457. {
  1458. AudioSampleBuffer*& currentAudioInputBuffer =
  1459. graph->audioAndCVBuffers->currentAudioInputBuffer;
  1460. for (int i = jmin (currentAudioInputBuffer->getNumChannels(),
  1461. audioBuffer.getNumChannels()); --i >= 0;)
  1462. {
  1463. audioBuffer.copyFrom (i, 0, *currentAudioInputBuffer, i, 0, audioBuffer.getNumSamples());
  1464. }
  1465. break;
  1466. }
  1467. case cvOutputNode:
  1468. {
  1469. AudioSampleBuffer& currentCVOutputBuffer =
  1470. graph->audioAndCVBuffers->currentCVOutputBuffer;
  1471. for (int i = jmin (currentCVOutputBuffer.getNumChannels(),
  1472. cvInBuffer.getNumChannels()); --i >= 0;)
  1473. {
  1474. currentCVOutputBuffer.addFrom (i, 0, cvInBuffer, i, 0, cvInBuffer.getNumSamples());
  1475. }
  1476. break;
  1477. }
  1478. case cvInputNode:
  1479. {
  1480. const AudioSampleBuffer*& currentCVInputBuffer =
  1481. graph->audioAndCVBuffers->currentCVInputBuffer;
  1482. for (int i = jmin (currentCVInputBuffer->getNumChannels(),
  1483. cvOutBuffer.getNumChannels()); --i >= 0;)
  1484. {
  1485. cvOutBuffer.copyFrom (i, 0, *currentCVInputBuffer, i, 0, cvOutBuffer.getNumSamples());
  1486. }
  1487. break;
  1488. }
  1489. case midiOutputNode:
  1490. graph->currentMidiOutputBuffer.addEvents (midiMessages, 0, audioBuffer.getNumSamples(), 0);
  1491. break;
  1492. case midiInputNode:
  1493. midiMessages.addEvents (*graph->currentMidiInputBuffer, 0, audioBuffer.getNumSamples(), 0);
  1494. break;
  1495. default:
  1496. break;
  1497. }
  1498. }
  1499. void AudioProcessorGraph::AudioGraphIOProcessor::processBlockWithCV (AudioSampleBuffer& audioBuffer,
  1500. const AudioSampleBuffer& cvInBuffer,
  1501. AudioSampleBuffer& cvOutBuffer,
  1502. MidiBuffer& midiMessages)
  1503. {
  1504. processAudioAndCV (audioBuffer, cvInBuffer, cvOutBuffer, midiMessages);
  1505. }
  1506. bool AudioProcessorGraph::AudioGraphIOProcessor::acceptsMidi() const
  1507. {
  1508. return type == midiOutputNode;
  1509. }
  1510. bool AudioProcessorGraph::AudioGraphIOProcessor::producesMidi() const
  1511. {
  1512. return type == midiInputNode;
  1513. }
  1514. bool AudioProcessorGraph::AudioGraphIOProcessor::isInput() const noexcept
  1515. {
  1516. return type == audioInputNode || type == cvInputNode || type == midiInputNode;
  1517. }
  1518. bool AudioProcessorGraph::AudioGraphIOProcessor::isOutput() const noexcept
  1519. {
  1520. return type == audioOutputNode || type == cvOutputNode || type == midiOutputNode;
  1521. }
  1522. void AudioProcessorGraph::AudioGraphIOProcessor::setParentGraph (AudioProcessorGraph* const newGraph)
  1523. {
  1524. graph = newGraph;
  1525. if (graph != nullptr)
  1526. {
  1527. setPlayConfigDetails (type == audioOutputNode
  1528. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeAudio)
  1529. : 0,
  1530. type == audioInputNode
  1531. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeAudio)
  1532. : 0,
  1533. type == cvOutputNode
  1534. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeCV)
  1535. : 0,
  1536. type == cvInputNode
  1537. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeCV)
  1538. : 0,
  1539. type == midiOutputNode
  1540. ? graph->getTotalNumOutputChannels(AudioProcessor::ChannelTypeMIDI)
  1541. : 0,
  1542. type == midiInputNode
  1543. ? graph->getTotalNumInputChannels(AudioProcessor::ChannelTypeMIDI)
  1544. : 0,
  1545. getSampleRate(),
  1546. getBlockSize());
  1547. }
  1548. }
  1549. }