You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

585 lines
20KB

  1. /*
  2. ==============================================================================
  3. This file was auto-generated!
  4. It contains the basic framework code for a JUCE plugin processor.
  5. ==============================================================================
  6. */
  7. #include "PluginProcessor.h"
  8. #include "PluginEditor.h"
  9. #include <set>
  10. #ifdef WIN32
  11. #undef min
  12. #undef max
  13. #endif
  14. std::set<PaulstretchpluginAudioProcessor*> g_activeprocessors;
  15. template<typename F>
  16. void callGUI(AudioProcessor* ap, F&& f, bool async)
  17. {
  18. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(ap->getActiveEditor());
  19. if (ed)
  20. {
  21. if (async == false)
  22. f(ed);
  23. else
  24. MessageManager::callAsync([ed,f]() { f(ed); });
  25. }
  26. }
  27. int get_optimized_updown(int n, bool up) {
  28. int orig_n = n;
  29. while (true) {
  30. n = orig_n;
  31. while (!(n % 11)) n /= 11;
  32. while (!(n % 7)) n /= 7;
  33. while (!(n % 5)) n /= 5;
  34. while (!(n % 3)) n /= 3;
  35. while (!(n % 2)) n /= 2;
  36. if (n<2) break;
  37. if (up) orig_n++;
  38. else orig_n--;
  39. if (orig_n<4) return 4;
  40. };
  41. return orig_n;
  42. };
  43. int optimizebufsize(int n) {
  44. int n1 = get_optimized_updown(n, false);
  45. int n2 = get_optimized_updown(n, true);
  46. if ((n - n1)<(n2 - n)) return n1;
  47. else return n2;
  48. };
  49. //==============================================================================
  50. PaulstretchpluginAudioProcessor::PaulstretchpluginAudioProcessor()
  51. : m_bufferingthread("pspluginprebufferthread")
  52. #ifndef JucePlugin_PreferredChannelConfigurations
  53. : AudioProcessor (BusesProperties()
  54. #if ! JucePlugin_IsMidiEffect
  55. #if ! JucePlugin_IsSynth
  56. .withInput ("Input", AudioChannelSet::stereo(), true)
  57. #endif
  58. .withOutput ("Output", AudioChannelSet::stereo(), true)
  59. #endif
  60. )
  61. #endif
  62. {
  63. g_activeprocessors.insert(this);
  64. m_recbuffer.setSize(2, 44100);
  65. m_recbuffer.clear();
  66. if (m_afm->getNumKnownFormats()==0)
  67. m_afm->registerBasicFormats();
  68. m_stretch_source = std::make_unique<StretchAudioSource>(2, m_afm);
  69. setPreBufferAmount(2);
  70. m_ppar.pitch_shift.enabled = true;
  71. m_ppar.freq_shift.enabled = true;
  72. m_ppar.filter.enabled = true;
  73. m_ppar.compressor.enabled = true;
  74. m_stretch_source->setOnsetDetection(0.0);
  75. m_stretch_source->setLoopingEnabled(true);
  76. m_stretch_source->setFFTWindowingType(1);
  77. addParameter(new AudioParameterFloat("mainvolume0", "Main volume", -24.0f, 12.0f, -3.0f)); // 0
  78. addParameter(new AudioParameterFloat("stretchamount0", "Stretch amount",
  79. NormalisableRange<float>(0.1f, 128.0f, 0.01f, 0.5),1.0f)); // 1
  80. addParameter(new AudioParameterFloat("fftsize0", "FFT size", 0.0f, 1.0f, 0.7f)); // 2
  81. addParameter(new AudioParameterFloat("pitchshift0", "Pitch shift", -24.0f, 24.0f, 0.0f)); // 3
  82. addParameter(new AudioParameterFloat("freqshift0", "Frequency shift", -1000.0f, 1000.0f, 0.0f)); // 4
  83. addParameter(new AudioParameterFloat("playrange_start0", "Sound start", 0.0f, 1.0f, 0.0f)); // 5
  84. addParameter(new AudioParameterFloat("playrange_end0", "Sound end", 0.0f, 1.0f, 1.0f)); // 6
  85. addParameter(new AudioParameterBool("freeze0", "Freeze", false)); // 7
  86. addParameter(new AudioParameterFloat("spread0", "Frequency spread", 0.0f, 1.0f, 0.0f)); // 8
  87. addParameter(new AudioParameterFloat("compress0", "Compress", 0.0f, 1.0f, 0.0f)); // 9
  88. addParameter(new AudioParameterFloat("loopxfadelen0", "Loop xfade length", 0.0f, 1.0f, 0.0f)); // 10
  89. addParameter(new AudioParameterFloat("numharmonics0", "Num harmonics", 0.0f, 100.0f, 0.0f)); // 11
  90. addParameter(new AudioParameterFloat("harmonicsfreq0", "Harmonics base freq",
  91. NormalisableRange<float>(1.0f, 5000.0f, 1.00f, 0.5), 128.0f)); // 12
  92. addParameter(new AudioParameterFloat("harmonicsbw0", "Harmonics bandwidth", 0.1f, 200.0f, 25.0f)); // 13
  93. addParameter(new AudioParameterBool("harmonicsgauss0", "Gaussian harmonics", false)); // 14
  94. addParameter(new AudioParameterFloat("octavemixm2_0", "2 octaves down level", 0.0f, 1.0f, 0.0f)); // 15
  95. addParameter(new AudioParameterFloat("octavemixm1_0", "Octave down level", 0.0f, 1.0f, 0.0f)); // 16
  96. addParameter(new AudioParameterFloat("octavemix0_0", "Normal pitch level", 0.0f, 1.0f, 1.0f)); // 17
  97. addParameter(new AudioParameterFloat("octavemix1_0", "1 octave up level", 0.0f, 1.0f, 0.0f)); // 18
  98. addParameter(new AudioParameterFloat("octavemix15_0", "1 octave and fifth up level", 0.0f, 1.0f, 0.0f)); // 19
  99. addParameter(new AudioParameterFloat("octavemix2_0", "2 octaves up level", 0.0f, 1.0f, 0.0f)); // 20
  100. addParameter(new AudioParameterFloat("tonalvsnoisebw_0", "Tonal vs Noise BW", 0.74f, 1.0f, 0.74f)); // 21
  101. addParameter(new AudioParameterFloat("tonalvsnoisepreserve_0", "Tonal vs Noise preserve", -1.0f, 1.0f, 0.5f)); // 22
  102. auto filt_convertFrom0To1Func = [](float rangemin, float rangemax, float value)
  103. {
  104. if (value < 0.5f)
  105. return jmap<float>(value, 0.0f, 0.5f, 20.0f, 1000.0f);
  106. return jmap<float>(value, 0.5f, 1.0f, 1000.0f, 20000.0f);
  107. };
  108. auto filt_convertTo0To1Func = [](float rangemin, float rangemax, float value)
  109. {
  110. if (value < 1000.0f)
  111. return jmap<float>(value, 20.0f, 1000.0f, 0.0f, 0.5f);
  112. return jmap<float>(value, 1000.0f, 20000.0f, 0.5f, 1.0f);
  113. };
  114. addParameter(new AudioParameterFloat("filter_low_0", "Filter low",
  115. NormalisableRange<float>(20.0f, 20000.0f,
  116. filt_convertFrom0To1Func, filt_convertTo0To1Func), 20.0f)); // 23
  117. addParameter(new AudioParameterFloat("filter_high_0", "Filter high",
  118. NormalisableRange<float>(20.0f, 20000.0f,
  119. filt_convertFrom0To1Func,filt_convertTo0To1Func), 20000.0f));; // 24
  120. addParameter(new AudioParameterFloat("onsetdetect_0", "Onset detection", 0.0f, 1.0f, 0.0f)); // 25
  121. addParameter(new AudioParameterBool("capture_enabled0", "Capture", false)); // 26
  122. m_outchansparam = new AudioParameterInt("numoutchans0", "Num output channels", 2, 8, 2); // 27
  123. addParameter(m_outchansparam); // 27
  124. addParameter(new AudioParameterBool("pause_enabled0", "Pause", false)); // 28
  125. startTimer(1, 50);
  126. }
  127. PaulstretchpluginAudioProcessor::~PaulstretchpluginAudioProcessor()
  128. {
  129. g_activeprocessors.erase(this);
  130. m_bufferingthread.stopThread(1000);
  131. }
  132. void PaulstretchpluginAudioProcessor::setPreBufferAmount(int x)
  133. {
  134. int temp = jlimit(0, 5, x);
  135. if (temp != m_prebuffer_amount)
  136. {
  137. m_prebuffer_amount = temp;
  138. m_recreate_buffering_source = true;
  139. }
  140. }
  141. //==============================================================================
  142. const String PaulstretchpluginAudioProcessor::getName() const
  143. {
  144. return JucePlugin_Name;
  145. }
  146. bool PaulstretchpluginAudioProcessor::acceptsMidi() const
  147. {
  148. #if JucePlugin_WantsMidiInput
  149. return true;
  150. #else
  151. return false;
  152. #endif
  153. }
  154. bool PaulstretchpluginAudioProcessor::producesMidi() const
  155. {
  156. #if JucePlugin_ProducesMidiOutput
  157. return true;
  158. #else
  159. return false;
  160. #endif
  161. }
  162. bool PaulstretchpluginAudioProcessor::isMidiEffect() const
  163. {
  164. #if JucePlugin_IsMidiEffect
  165. return true;
  166. #else
  167. return false;
  168. #endif
  169. }
  170. double PaulstretchpluginAudioProcessor::getTailLengthSeconds() const
  171. {
  172. return (double)m_bufamounts[m_prebuffer_amount]/getSampleRate();
  173. }
  174. int PaulstretchpluginAudioProcessor::getNumPrograms()
  175. {
  176. return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
  177. // so this should be at least 1, even if you're not really implementing programs.
  178. }
  179. int PaulstretchpluginAudioProcessor::getCurrentProgram()
  180. {
  181. return 0;
  182. }
  183. void PaulstretchpluginAudioProcessor::setCurrentProgram (int index)
  184. {
  185. }
  186. const String PaulstretchpluginAudioProcessor::getProgramName (int index)
  187. {
  188. return {};
  189. }
  190. void PaulstretchpluginAudioProcessor::changeProgramName (int index, const String& newName)
  191. {
  192. }
  193. void PaulstretchpluginAudioProcessor::setFFTSize(double size)
  194. {
  195. if (m_prebuffer_amount == 5)
  196. m_fft_size_to_use = pow(2, 7.0 + size * 14.5);
  197. else m_fft_size_to_use = pow(2, 7.0 + size * 10.0); // chicken out from allowing huge FFT sizes if not enough prebuffering
  198. int optim = optimizebufsize(m_fft_size_to_use);
  199. m_fft_size_to_use = optim;
  200. m_stretch_source->setFFTSize(optim);
  201. //Logger::writeToLog(String(m_fft_size_to_use));
  202. }
  203. void PaulstretchpluginAudioProcessor::startplay(Range<double> playrange, int numoutchans, int maxBlockSize, String& err)
  204. {
  205. m_stretch_source->setPlayRange(playrange, true);
  206. int bufamt = m_bufamounts[m_prebuffer_amount];
  207. if (m_buffering_source != nullptr && numoutchans != m_buffering_source->getNumberOfChannels())
  208. m_recreate_buffering_source = true;
  209. if (m_recreate_buffering_source == true)
  210. {
  211. m_buffering_source = std::make_unique<MyBufferingAudioSource>(m_stretch_source.get(),
  212. m_bufferingthread, false, bufamt, numoutchans, false);
  213. m_recreate_buffering_source = false;
  214. }
  215. if (m_bufferingthread.isThreadRunning() == false)
  216. m_bufferingthread.startThread();
  217. m_stretch_source->setNumOutChannels(numoutchans);
  218. m_stretch_source->setFFTSize(m_fft_size_to_use);
  219. m_stretch_source->setProcessParameters(&m_ppar);
  220. m_last_outpos_pos = 0.0;
  221. m_last_in_pos = playrange.getStart()*m_stretch_source->getInfileLengthSeconds();
  222. m_buffering_source->prepareToPlay(maxBlockSize, getSampleRate());
  223. };
  224. void PaulstretchpluginAudioProcessor::prepareToPlay(double sampleRate, int samplesPerBlock)
  225. {
  226. ScopedLock locker(m_cs);
  227. m_curmaxblocksize = samplesPerBlock;
  228. int numoutchans = *m_outchansparam;
  229. if (numoutchans != m_cur_num_out_chans)
  230. m_ready_to_play = false;
  231. if (m_using_memory_buffer == true)
  232. {
  233. int len = jlimit(100,m_recbuffer.getNumSamples(), m_rec_pos);
  234. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer,
  235. getSampleRate(),
  236. len);
  237. callGUI(this,[this,len](auto ed) { ed->setAudioBuffer(&m_recbuffer, getSampleRate(), len); },false);
  238. }
  239. if (m_ready_to_play == false)
  240. {
  241. setFFTSize(*getFloatParameter(cpi_fftsize));
  242. m_stretch_source->setProcessParameters(&m_ppar);
  243. m_stretch_source->setFFTWindowingType(1);
  244. String err;
  245. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  246. numoutchans, samplesPerBlock, err);
  247. m_cur_num_out_chans = numoutchans;
  248. m_ready_to_play = true;
  249. }
  250. }
  251. void PaulstretchpluginAudioProcessor::releaseResources()
  252. {
  253. //m_control->stopplay();
  254. //m_ready_to_play = false;
  255. }
  256. #ifndef JucePlugin_PreferredChannelConfigurations
  257. bool PaulstretchpluginAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
  258. {
  259. #if JucePlugin_IsMidiEffect
  260. ignoreUnused (layouts);
  261. return true;
  262. #else
  263. // This is the place where you check if the layout is supported.
  264. // In this template code we only support mono or stereo.
  265. if (layouts.getMainOutputChannelSet() != AudioChannelSet::mono()
  266. && layouts.getMainOutputChannelSet() != AudioChannelSet::stereo())
  267. return false;
  268. // This checks if the input layout matches the output layout
  269. #if ! JucePlugin_IsSynth
  270. if (layouts.getMainOutputChannelSet() != layouts.getMainInputChannelSet())
  271. return false;
  272. #endif
  273. return true;
  274. #endif
  275. }
  276. #endif
  277. void copyAudioBufferWrappingPosition(const AudioBuffer<float>& src, AudioBuffer<float>& dest, int destbufpos, int maxdestpos)
  278. {
  279. for (int i = 0; i < dest.getNumChannels(); ++i)
  280. {
  281. int channel_to_copy = i % src.getNumChannels();
  282. if (destbufpos + src.getNumSamples() > maxdestpos)
  283. {
  284. int wrappos = (destbufpos + src.getNumSamples()) % maxdestpos;
  285. int partial_len = src.getNumSamples() - wrappos;
  286. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, partial_len);
  287. dest.copyFrom(channel_to_copy, partial_len, src, channel_to_copy, 0, wrappos);
  288. }
  289. else
  290. {
  291. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, src.getNumSamples());
  292. }
  293. }
  294. }
  295. void PaulstretchpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
  296. {
  297. ScopedLock locker(m_cs);
  298. ScopedNoDenormals noDenormals;
  299. const int totalNumInputChannels = getTotalNumInputChannels();
  300. const int totalNumOutputChannels = getTotalNumOutputChannels();
  301. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
  302. buffer.clear (i, 0, buffer.getNumSamples());
  303. if (m_ready_to_play == false)
  304. return;
  305. if (m_is_recording == true)
  306. {
  307. int recbuflenframes = m_max_reclen * getSampleRate();
  308. copyAudioBufferWrappingPosition(buffer, m_recbuffer, m_rec_pos, recbuflenframes);
  309. callGUI(this,[this, &buffer](PaulstretchpluginAudioProcessorEditor*ed)
  310. {
  311. ed->addAudioBlock(buffer, getSampleRate(), m_rec_pos);
  312. }, false);
  313. m_rec_pos = (m_rec_pos + buffer.getNumSamples()) % recbuflenframes;
  314. return;
  315. }
  316. jassert(m_buffering_source != nullptr);
  317. jassert(m_bufferingthread.isThreadRunning());
  318. m_stretch_source->setMainVolume(*getFloatParameter(cpi_main_volume));
  319. m_stretch_source->setRate(*getFloatParameter(cpi_stretchamount));
  320. setFFTSize(*getFloatParameter(cpi_fftsize));
  321. m_ppar.pitch_shift.cents = *getFloatParameter(cpi_pitchshift) * 100.0;
  322. m_ppar.freq_shift.Hz = *getFloatParameter(cpi_frequencyshift);
  323. m_ppar.spread.enabled = *getFloatParameter(cpi_spreadamount) > 0.0f;
  324. m_ppar.spread.bandwidth = *getFloatParameter(cpi_spreadamount);
  325. m_ppar.compressor.enabled = *getFloatParameter(cpi_compress)>0.0f;
  326. m_ppar.compressor.power = *getFloatParameter(cpi_compress);
  327. m_ppar.harmonics.enabled = *getFloatParameter(cpi_numharmonics)>=1.0;
  328. m_ppar.harmonics.nharmonics = *getFloatParameter(cpi_numharmonics);
  329. m_ppar.harmonics.freq = *getFloatParameter(cpi_harmonicsfreq);
  330. m_ppar.harmonics.bandwidth = *getFloatParameter(cpi_harmonicsbw);
  331. m_ppar.harmonics.gauss = getParameter(cpi_harmonicsgauss);
  332. m_ppar.octave.om2 = *getFloatParameter(cpi_octavesm2);
  333. m_ppar.octave.om1 = *getFloatParameter(cpi_octavesm1);
  334. m_ppar.octave.o0 = *getFloatParameter(cpi_octaves0);
  335. m_ppar.octave.o1 = *getFloatParameter(cpi_octaves1);
  336. m_ppar.octave.o15 = *getFloatParameter(cpi_octaves15);
  337. m_ppar.octave.o2 = *getFloatParameter(cpi_octaves2);
  338. m_ppar.octave.enabled = true;
  339. m_ppar.filter.low = *getFloatParameter(cpi_filter_low);
  340. m_ppar.filter.high = *getFloatParameter(cpi_filter_high);
  341. m_ppar.tonal_vs_noise.enabled = (*getFloatParameter(cpi_tonalvsnoisebw)) > 0.75;
  342. m_ppar.tonal_vs_noise.bandwidth = *getFloatParameter(cpi_tonalvsnoisebw);
  343. m_ppar.tonal_vs_noise.preserve = *getFloatParameter(cpi_tonalvsnoisepreserve);
  344. m_stretch_source->setOnsetDetection(*getFloatParameter(cpi_onsetdetection));
  345. m_stretch_source->setLoopXFadeLength(*getFloatParameter(cpi_loopxfadelen));
  346. double t0 = *getFloatParameter(cpi_soundstart);
  347. double t1 = *getFloatParameter(cpi_soundend);
  348. if (t0 > t1)
  349. std::swap(t0, t1);
  350. if (t1 - t0 < 0.001)
  351. t1 = t0 + 0.001;
  352. m_stretch_source->setPlayRange({ t0,t1 }, true);
  353. m_stretch_source->setFreezing(getParameter(cpi_freeze));
  354. m_stretch_source->setPaused(getParameter(cpi_pause_enabled));
  355. m_stretch_source->setProcessParameters(&m_ppar);
  356. AudioSourceChannelInfo aif(buffer);
  357. m_buffering_source->getNextAudioBlock(aif);
  358. }
  359. //==============================================================================
  360. bool PaulstretchpluginAudioProcessor::hasEditor() const
  361. {
  362. return true; // (change this to false if you choose to not supply an editor)
  363. }
  364. AudioProcessorEditor* PaulstretchpluginAudioProcessor::createEditor()
  365. {
  366. return new PaulstretchpluginAudioProcessorEditor (*this);
  367. }
  368. //==============================================================================
  369. void PaulstretchpluginAudioProcessor::getStateInformation (MemoryBlock& destData)
  370. {
  371. ValueTree paramtree("paulstretch3pluginstate");
  372. for (int i=0;i<getNumParameters();++i)
  373. {
  374. auto par = getFloatParameter(i);
  375. if (par != nullptr)
  376. {
  377. paramtree.setProperty(par->paramID, (double)*par, nullptr);
  378. }
  379. }
  380. paramtree.setProperty(m_outchansparam->paramID, (int)*m_outchansparam, nullptr);
  381. if (m_current_file != File())
  382. {
  383. paramtree.setProperty("importedfile", m_current_file.getFullPathName(), nullptr);
  384. }
  385. MemoryOutputStream stream(destData,true);
  386. paramtree.writeToStream(stream);
  387. }
  388. void PaulstretchpluginAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
  389. {
  390. ValueTree tree = ValueTree::readFromData(data, sizeInBytes);
  391. if (tree.isValid())
  392. {
  393. {
  394. ScopedLock locker(m_cs);
  395. for (int i = 0; i < getNumParameters(); ++i)
  396. {
  397. auto par = getFloatParameter(i);
  398. if (par != nullptr)
  399. {
  400. double parval = tree.getProperty(par->paramID, (double)*par);
  401. *par = parval;
  402. }
  403. }
  404. if (tree.hasProperty(m_outchansparam->paramID))
  405. *m_outchansparam = tree.getProperty(m_outchansparam->paramID, 2);
  406. }
  407. String fn = tree.getProperty("importedfile");
  408. if (fn.isEmpty() == false)
  409. {
  410. File f(fn);
  411. setAudioFile(f);
  412. }
  413. }
  414. }
  415. void PaulstretchpluginAudioProcessor::setRecordingEnabled(bool b)
  416. {
  417. ScopedLock locker(m_cs);
  418. int lenbufframes = getSampleRate()*m_max_reclen;
  419. if (b == true)
  420. {
  421. m_using_memory_buffer = true;
  422. m_current_file = File();
  423. m_recbuffer.setSize(2, m_max_reclen*getSampleRate()+4096,false,false,true);
  424. m_recbuffer.clear();
  425. m_rec_pos = 0;
  426. callGUI(this,[this,lenbufframes](PaulstretchpluginAudioProcessorEditor* ed)
  427. {
  428. ed->beginAddingAudioBlocks(2, getSampleRate(), lenbufframes);
  429. },false);
  430. m_is_recording = true;
  431. }
  432. else
  433. {
  434. if (m_is_recording == true)
  435. {
  436. finishRecording(lenbufframes);
  437. }
  438. }
  439. }
  440. double PaulstretchpluginAudioProcessor::getRecordingPositionPercent()
  441. {
  442. if (m_is_recording==false)
  443. return 0.0;
  444. return 1.0 / m_recbuffer.getNumSamples()*m_rec_pos;
  445. }
  446. String PaulstretchpluginAudioProcessor::setAudioFile(File f)
  447. {
  448. //if (f==File())
  449. // return String();
  450. //if (f==m_current_file && f.getLastModificationTime()==m_current_file_date)
  451. // return String();
  452. auto ai = unique_from_raw(m_afm->createReaderFor(f));
  453. if (ai != nullptr)
  454. {
  455. if (ai->numChannels > 32)
  456. {
  457. //MessageManager::callAsync([cb, file]() { cb("Too many channels in file " + file.getFullPathName()); });
  458. return "Too many channels in file "+f.getFullPathName();
  459. }
  460. if (ai->bitsPerSample>32)
  461. {
  462. //MessageManager::callAsync([cb, file]() { cb("Too high bit depth in file " + file.getFullPathName()); });
  463. return "Too high bit depth in file " + f.getFullPathName();
  464. }
  465. ScopedLock locker(m_cs);
  466. m_stretch_source->setAudioFile(f);
  467. m_current_file = f;
  468. m_current_file_date = m_current_file.getLastModificationTime();
  469. m_using_memory_buffer = false;
  470. return String();
  471. //MessageManager::callAsync([cb, file]() { cb(String()); });
  472. }
  473. return "Could not open file " + f.getFullPathName();
  474. }
  475. Range<double> PaulstretchpluginAudioProcessor::getTimeSelection()
  476. {
  477. return { *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) };
  478. }
  479. double PaulstretchpluginAudioProcessor::getPreBufferingPercent()
  480. {
  481. if (m_buffering_source==nullptr)
  482. return 0.0;
  483. return m_buffering_source->getPercentReady();
  484. }
  485. void PaulstretchpluginAudioProcessor::timerCallback(int id)
  486. {
  487. if (id == 1)
  488. {
  489. bool capture = getParameter(cpi_capture_enabled);
  490. if (capture == true && m_is_recording == false)
  491. {
  492. setRecordingEnabled(true);
  493. return;
  494. }
  495. if (capture == false && m_is_recording == true)
  496. {
  497. setRecordingEnabled(false);
  498. return;
  499. }
  500. if (m_cur_num_out_chans != *m_outchansparam)
  501. {
  502. jassert(m_curmaxblocksize > 0);
  503. ScopedLock locker(m_cs);
  504. m_ready_to_play = false;
  505. m_cur_num_out_chans = *m_outchansparam;
  506. //Logger::writeToLog("Switching to use " + String(m_cur_num_out_chans) + " out channels");
  507. String err;
  508. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  509. m_cur_num_out_chans, m_curmaxblocksize, err);
  510. m_ready_to_play = true;
  511. }
  512. }
  513. }
  514. void PaulstretchpluginAudioProcessor::finishRecording(int lenrecording)
  515. {
  516. m_is_recording = false;
  517. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer, getSampleRate(), lenrecording);
  518. m_stretch_source->setPlayRange({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) }, true);
  519. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(getActiveEditor());
  520. if (ed)
  521. {
  522. //ed->setAudioBuffer(&m_recbuffer, getSampleRate(), lenrecording);
  523. }
  524. }
  525. //==============================================================================
  526. // This creates new instances of the plugin..
  527. AudioProcessor* JUCE_CALLTYPE createPluginFilter()
  528. {
  529. return new PaulstretchpluginAudioProcessor();
  530. }