You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

700 lines
24KB

  1. /*
  2. Copyright (C) 2006-2011 Nasca Octavian Paul
  3. Author: Nasca Octavian Paul
  4. Copyright (C) 2017 Xenakios
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of version 2 of the GNU General Public License
  7. as published by the Free Software Foundation.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License (version 2) for more details.
  12. You should have received a copy of the GNU General Public License (version 2)
  13. along with this program; if not, write to the Free Software Foundation,
  14. Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  15. */
  16. #include "PluginProcessor.h"
  17. #include "PluginEditor.h"
  18. #include <set>
  19. #ifdef WIN32
  20. #undef min
  21. #undef max
  22. #endif
  23. std::set<PaulstretchpluginAudioProcessor*> g_activeprocessors;
  24. template<typename F>
  25. void callGUI(AudioProcessor* ap, F&& f, bool async)
  26. {
  27. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(ap->getActiveEditor());
  28. if (ed)
  29. {
  30. if (async == false)
  31. f(ed);
  32. else
  33. MessageManager::callAsync([ed,f]() { f(ed); });
  34. }
  35. }
  36. int get_optimized_updown(int n, bool up) {
  37. int orig_n = n;
  38. while (true) {
  39. n = orig_n;
  40. while (!(n % 11)) n /= 11;
  41. while (!(n % 7)) n /= 7;
  42. while (!(n % 5)) n /= 5;
  43. while (!(n % 3)) n /= 3;
  44. while (!(n % 2)) n /= 2;
  45. if (n<2) break;
  46. if (up) orig_n++;
  47. else orig_n--;
  48. if (orig_n<4) return 4;
  49. };
  50. return orig_n;
  51. };
  52. int optimizebufsize(int n) {
  53. int n1 = get_optimized_updown(n, false);
  54. int n2 = get_optimized_updown(n, true);
  55. if ((n - n1)<(n2 - n)) return n1;
  56. else return n2;
  57. };
  58. //==============================================================================
  59. PaulstretchpluginAudioProcessor::PaulstretchpluginAudioProcessor()
  60. : m_bufferingthread("pspluginprebufferthread")
  61. #ifndef JucePlugin_PreferredChannelConfigurations
  62. : AudioProcessor (BusesProperties()
  63. #if ! JucePlugin_IsMidiEffect
  64. #if ! JucePlugin_IsSynth
  65. .withInput ("Input", AudioChannelSet::stereo(), true)
  66. #endif
  67. .withOutput ("Output", AudioChannelSet::stereo(), true)
  68. #endif
  69. )
  70. #endif
  71. {
  72. g_activeprocessors.insert(this);
  73. m_recbuffer.setSize(2, 44100);
  74. m_recbuffer.clear();
  75. if (m_afm->getNumKnownFormats()==0)
  76. m_afm->registerBasicFormats();
  77. m_stretch_source = std::make_unique<StretchAudioSource>(2, m_afm);
  78. m_ppar.pitch_shift.enabled = true;
  79. m_ppar.freq_shift.enabled = true;
  80. m_ppar.filter.enabled = true;
  81. m_ppar.compressor.enabled = true;
  82. m_stretch_source->setOnsetDetection(0.0);
  83. m_stretch_source->setLoopingEnabled(true);
  84. m_stretch_source->setFFTWindowingType(1);
  85. addParameter(new AudioParameterFloat("mainvolume0", "Main volume", -24.0f, 12.0f, -3.0f)); // 0
  86. addParameter(new AudioParameterFloat("stretchamount0", "Stretch amount",
  87. NormalisableRange<float>(0.1f, 1024.0f, 0.01f, 0.25),1.0f)); // 1
  88. addParameter(new AudioParameterFloat("fftsize0", "FFT size", 0.0f, 1.0f, 0.7f)); // 2
  89. addParameter(new AudioParameterFloat("pitchshift0", "Pitch shift", -24.0f, 24.0f, 0.0f)); // 3
  90. addParameter(new AudioParameterFloat("freqshift0", "Frequency shift", -1000.0f, 1000.0f, 0.0f)); // 4
  91. addParameter(new AudioParameterFloat("playrange_start0", "Sound start", 0.0f, 1.0f, 0.0f)); // 5
  92. addParameter(new AudioParameterFloat("playrange_end0", "Sound end", 0.0f, 1.0f, 1.0f)); // 6
  93. addParameter(new AudioParameterBool("freeze0", "Freeze", false)); // 7
  94. addParameter(new AudioParameterFloat("spread0", "Frequency spread", 0.0f, 1.0f, 0.0f)); // 8
  95. addParameter(new AudioParameterFloat("compress0", "Compress", 0.0f, 1.0f, 0.0f)); // 9
  96. addParameter(new AudioParameterFloat("loopxfadelen0", "Loop xfade length", 0.0f, 1.0f, 0.0f)); // 10
  97. addParameter(new AudioParameterFloat("numharmonics0", "Num harmonics", 0.0f, 100.0f, 0.0f)); // 11
  98. addParameter(new AudioParameterFloat("harmonicsfreq0", "Harmonics base freq",
  99. NormalisableRange<float>(1.0f, 5000.0f, 1.00f, 0.5), 128.0f)); // 12
  100. addParameter(new AudioParameterFloat("harmonicsbw0", "Harmonics bandwidth", 0.1f, 200.0f, 25.0f)); // 13
  101. addParameter(new AudioParameterBool("harmonicsgauss0", "Gaussian harmonics", false)); // 14
  102. addParameter(new AudioParameterFloat("octavemixm2_0", "2 octaves down level", 0.0f, 1.0f, 0.0f)); // 15
  103. addParameter(new AudioParameterFloat("octavemixm1_0", "Octave down level", 0.0f, 1.0f, 0.0f)); // 16
  104. addParameter(new AudioParameterFloat("octavemix0_0", "Normal pitch level", 0.0f, 1.0f, 1.0f)); // 17
  105. addParameter(new AudioParameterFloat("octavemix1_0", "1 octave up level", 0.0f, 1.0f, 0.0f)); // 18
  106. addParameter(new AudioParameterFloat("octavemix15_0", "1 octave and fifth up level", 0.0f, 1.0f, 0.0f)); // 19
  107. addParameter(new AudioParameterFloat("octavemix2_0", "2 octaves up level", 0.0f, 1.0f, 0.0f)); // 20
  108. addParameter(new AudioParameterFloat("tonalvsnoisebw_0", "Tonal vs Noise BW", 0.74f, 1.0f, 0.74f)); // 21
  109. addParameter(new AudioParameterFloat("tonalvsnoisepreserve_0", "Tonal vs Noise preserve", -1.0f, 1.0f, 0.5f)); // 22
  110. auto filt_convertFrom0To1Func = [](float rangemin, float rangemax, float value)
  111. {
  112. if (value < 0.5f)
  113. return jmap<float>(value, 0.0f, 0.5f, 20.0f, 1000.0f);
  114. return jmap<float>(value, 0.5f, 1.0f, 1000.0f, 20000.0f);
  115. };
  116. auto filt_convertTo0To1Func = [](float rangemin, float rangemax, float value)
  117. {
  118. if (value < 1000.0f)
  119. return jmap<float>(value, 20.0f, 1000.0f, 0.0f, 0.5f);
  120. return jmap<float>(value, 1000.0f, 20000.0f, 0.5f, 1.0f);
  121. };
  122. addParameter(new AudioParameterFloat("filter_low_0", "Filter low",
  123. NormalisableRange<float>(20.0f, 20000.0f,
  124. filt_convertFrom0To1Func, filt_convertTo0To1Func), 20.0f)); // 23
  125. addParameter(new AudioParameterFloat("filter_high_0", "Filter high",
  126. NormalisableRange<float>(20.0f, 20000.0f,
  127. filt_convertFrom0To1Func,filt_convertTo0To1Func), 20000.0f));; // 24
  128. addParameter(new AudioParameterFloat("onsetdetect_0", "Onset detection", 0.0f, 1.0f, 0.0f)); // 25
  129. addParameter(new AudioParameterBool("capture_enabled0", "Capture", false)); // 26
  130. m_outchansparam = new AudioParameterInt("numoutchans0", "Num output channels", 2, 8, 2); // 27
  131. addParameter(m_outchansparam); // 27
  132. addParameter(new AudioParameterBool("pause_enabled0", "Pause", false)); // 28
  133. addParameter(new AudioParameterFloat("maxcapturelen_0", "Max capture length", 1.0f, 120.0f, 10.0f)); // 29
  134. addParameter(new AudioParameterBool("passthrough0", "Pass input through", false)); // 30
  135. setPreBufferAmount(2);
  136. startTimer(1, 50);
  137. }
  138. PaulstretchpluginAudioProcessor::~PaulstretchpluginAudioProcessor()
  139. {
  140. g_activeprocessors.erase(this);
  141. m_bufferingthread.stopThread(1000);
  142. }
  143. void PaulstretchpluginAudioProcessor::setPreBufferAmount(int x)
  144. {
  145. int temp = jlimit(0, 5, x);
  146. if (temp != m_prebuffer_amount || m_use_backgroundbuffering == false)
  147. {
  148. m_use_backgroundbuffering = true;
  149. m_prebuffer_amount = temp;
  150. m_recreate_buffering_source = true;
  151. ScopedLock locker(m_cs);
  152. m_ready_to_play = false;
  153. m_cur_num_out_chans = *m_outchansparam;
  154. //Logger::writeToLog("Switching to use " + String(m_cur_num_out_chans) + " out channels");
  155. String err;
  156. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  157. m_cur_num_out_chans, m_curmaxblocksize, err);
  158. m_ready_to_play = true;
  159. }
  160. }
  161. int PaulstretchpluginAudioProcessor::getPreBufferAmount()
  162. {
  163. if (m_use_backgroundbuffering == false)
  164. return -1;
  165. return m_prebuffer_amount;
  166. }
  167. //==============================================================================
  168. const String PaulstretchpluginAudioProcessor::getName() const
  169. {
  170. return JucePlugin_Name;
  171. }
  172. bool PaulstretchpluginAudioProcessor::acceptsMidi() const
  173. {
  174. #if JucePlugin_WantsMidiInput
  175. return true;
  176. #else
  177. return false;
  178. #endif
  179. }
  180. bool PaulstretchpluginAudioProcessor::producesMidi() const
  181. {
  182. #if JucePlugin_ProducesMidiOutput
  183. return true;
  184. #else
  185. return false;
  186. #endif
  187. }
  188. bool PaulstretchpluginAudioProcessor::isMidiEffect() const
  189. {
  190. #if JucePlugin_IsMidiEffect
  191. return true;
  192. #else
  193. return false;
  194. #endif
  195. }
  196. double PaulstretchpluginAudioProcessor::getTailLengthSeconds() const
  197. {
  198. return 0.0;
  199. //return (double)m_bufamounts[m_prebuffer_amount]/getSampleRate();
  200. }
  201. int PaulstretchpluginAudioProcessor::getNumPrograms()
  202. {
  203. return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
  204. // so this should be at least 1, even if you're not really implementing programs.
  205. }
  206. int PaulstretchpluginAudioProcessor::getCurrentProgram()
  207. {
  208. return 0;
  209. }
  210. void PaulstretchpluginAudioProcessor::setCurrentProgram (int index)
  211. {
  212. }
  213. const String PaulstretchpluginAudioProcessor::getProgramName (int index)
  214. {
  215. return {};
  216. }
  217. void PaulstretchpluginAudioProcessor::changeProgramName (int index, const String& newName)
  218. {
  219. }
  220. void PaulstretchpluginAudioProcessor::setFFTSize(double size)
  221. {
  222. if (m_prebuffer_amount == 5)
  223. m_fft_size_to_use = pow(2, 7.0 + size * 14.5);
  224. else m_fft_size_to_use = pow(2, 7.0 + size * 10.0); // chicken out from allowing huge FFT sizes if not enough prebuffering
  225. int optim = optimizebufsize(m_fft_size_to_use);
  226. m_fft_size_to_use = optim;
  227. m_stretch_source->setFFTSize(optim);
  228. //Logger::writeToLog(String(m_fft_size_to_use));
  229. }
  230. void PaulstretchpluginAudioProcessor::startplay(Range<double> playrange, int numoutchans, int maxBlockSize, String& err)
  231. {
  232. m_stretch_source->setPlayRange(playrange, true);
  233. int bufamt = m_bufamounts[m_prebuffer_amount];
  234. if (m_buffering_source != nullptr && numoutchans != m_buffering_source->getNumberOfChannels())
  235. m_recreate_buffering_source = true;
  236. if (m_recreate_buffering_source == true)
  237. {
  238. m_buffering_source = std::make_unique<MyBufferingAudioSource>(m_stretch_source.get(),
  239. m_bufferingthread, false, bufamt, numoutchans, false);
  240. m_recreate_buffering_source = false;
  241. }
  242. if (m_bufferingthread.isThreadRunning() == false)
  243. m_bufferingthread.startThread();
  244. m_stretch_source->setNumOutChannels(numoutchans);
  245. m_stretch_source->setFFTSize(m_fft_size_to_use);
  246. m_stretch_source->setProcessParameters(&m_ppar);
  247. m_last_outpos_pos = 0.0;
  248. m_last_in_pos = playrange.getStart()*m_stretch_source->getInfileLengthSeconds();
  249. m_buffering_source->prepareToPlay(maxBlockSize, getSampleRateChecked());
  250. }
  251. double PaulstretchpluginAudioProcessor::getSampleRateChecked()
  252. {
  253. if (m_cur_sr < 1.0)
  254. return 44100.0;
  255. return m_cur_sr;
  256. }
  257. void PaulstretchpluginAudioProcessor::prepareToPlay(double sampleRate, int samplesPerBlock)
  258. {
  259. ScopedLock locker(m_cs);
  260. m_cur_sr = sampleRate;
  261. m_curmaxblocksize = samplesPerBlock;
  262. m_input_buffer.setSize(2, samplesPerBlock);
  263. int numoutchans = *m_outchansparam;
  264. if (numoutchans != m_cur_num_out_chans)
  265. m_ready_to_play = false;
  266. if (m_using_memory_buffer == true)
  267. {
  268. int len = jlimit(100,m_recbuffer.getNumSamples(),
  269. int(getSampleRateChecked()*(*getFloatParameter(cpi_max_capture_len))));
  270. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer,
  271. getSampleRateChecked(),
  272. len);
  273. callGUI(this,[this,len](auto ed) { ed->setAudioBuffer(&m_recbuffer, getSampleRateChecked(), len); },false);
  274. }
  275. if (m_ready_to_play == false)
  276. {
  277. setFFTSize(*getFloatParameter(cpi_fftsize));
  278. m_stretch_source->setProcessParameters(&m_ppar);
  279. m_stretch_source->setFFTWindowingType(1);
  280. String err;
  281. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  282. numoutchans, samplesPerBlock, err);
  283. m_cur_num_out_chans = numoutchans;
  284. m_ready_to_play = true;
  285. }
  286. }
  287. void PaulstretchpluginAudioProcessor::releaseResources()
  288. {
  289. //m_control->stopplay();
  290. //m_ready_to_play = false;
  291. }
  292. #ifndef JucePlugin_PreferredChannelConfigurations
  293. bool PaulstretchpluginAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
  294. {
  295. #if JucePlugin_IsMidiEffect
  296. ignoreUnused (layouts);
  297. return true;
  298. #else
  299. // This is the place where you check if the layout is supported.
  300. // In this template code we only support mono or stereo.
  301. if (layouts.getMainOutputChannelSet() != AudioChannelSet::mono()
  302. && layouts.getMainOutputChannelSet() != AudioChannelSet::stereo())
  303. return false;
  304. // This checks if the input layout matches the output layout
  305. #if ! JucePlugin_IsSynth
  306. if (layouts.getMainOutputChannelSet() != layouts.getMainInputChannelSet())
  307. return false;
  308. #endif
  309. return true;
  310. #endif
  311. }
  312. #endif
  313. void copyAudioBufferWrappingPosition(const AudioBuffer<float>& src, AudioBuffer<float>& dest, int destbufpos, int maxdestpos)
  314. {
  315. for (int i = 0; i < dest.getNumChannels(); ++i)
  316. {
  317. int channel_to_copy = i % src.getNumChannels();
  318. if (destbufpos + src.getNumSamples() > maxdestpos)
  319. {
  320. int wrappos = (destbufpos + src.getNumSamples()) % maxdestpos;
  321. int partial_len = src.getNumSamples() - wrappos;
  322. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, partial_len);
  323. dest.copyFrom(channel_to_copy, partial_len, src, channel_to_copy, 0, wrappos);
  324. }
  325. else
  326. {
  327. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, src.getNumSamples());
  328. }
  329. }
  330. }
  331. void PaulstretchpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
  332. {
  333. ScopedLock locker(m_cs);
  334. AudioPlayHead* phead = getPlayHead();
  335. if (phead != nullptr)
  336. {
  337. phead->getCurrentPosition(m_playposinfo);
  338. }
  339. ScopedNoDenormals noDenormals;
  340. double srtemp = getSampleRate();
  341. if (srtemp != m_cur_sr)
  342. m_cur_sr = srtemp;
  343. const int totalNumInputChannels = getTotalNumInputChannels();
  344. const int totalNumOutputChannels = getTotalNumOutputChannels();
  345. for (int i = 0; i < totalNumInputChannels; ++i)
  346. m_input_buffer.copyFrom(i, 0, buffer, i, 0, buffer.getNumSamples());
  347. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
  348. buffer.clear (i, 0, buffer.getNumSamples());
  349. if (m_ready_to_play == false)
  350. return;
  351. if (m_is_recording == true)
  352. {
  353. if (m_playposinfo.isPlaying == false && m_capture_when_host_plays == true)
  354. return;
  355. int recbuflenframes = m_max_reclen * getSampleRate();
  356. copyAudioBufferWrappingPosition(buffer, m_recbuffer, m_rec_pos, recbuflenframes);
  357. callGUI(this,[this, &buffer](PaulstretchpluginAudioProcessorEditor*ed)
  358. {
  359. ed->addAudioBlock(buffer, getSampleRate(), m_rec_pos);
  360. }, false);
  361. m_rec_pos = (m_rec_pos + buffer.getNumSamples()) % recbuflenframes;
  362. return;
  363. }
  364. jassert(m_buffering_source != nullptr);
  365. jassert(m_bufferingthread.isThreadRunning());
  366. if (m_last_host_playing == false && m_playposinfo.isPlaying)
  367. {
  368. m_stretch_source->seekPercent(*getFloatParameter(cpi_soundstart));
  369. m_last_host_playing = true;
  370. }
  371. else if (m_last_host_playing == true && m_playposinfo.isPlaying == false)
  372. {
  373. m_last_host_playing = false;
  374. }
  375. if (m_play_when_host_plays == true && m_playposinfo.isPlaying == false)
  376. return;
  377. m_stretch_source->setMainVolume(*getFloatParameter(cpi_main_volume));
  378. m_stretch_source->setRate(*getFloatParameter(cpi_stretchamount));
  379. setFFTSize(*getFloatParameter(cpi_fftsize));
  380. m_ppar.pitch_shift.cents = *getFloatParameter(cpi_pitchshift) * 100.0;
  381. m_ppar.freq_shift.Hz = *getFloatParameter(cpi_frequencyshift);
  382. m_ppar.spread.enabled = *getFloatParameter(cpi_spreadamount) > 0.0f;
  383. m_ppar.spread.bandwidth = *getFloatParameter(cpi_spreadamount);
  384. m_ppar.compressor.enabled = *getFloatParameter(cpi_compress)>0.0f;
  385. m_ppar.compressor.power = *getFloatParameter(cpi_compress);
  386. m_ppar.harmonics.enabled = *getFloatParameter(cpi_numharmonics)>=1.0;
  387. m_ppar.harmonics.nharmonics = *getFloatParameter(cpi_numharmonics);
  388. m_ppar.harmonics.freq = *getFloatParameter(cpi_harmonicsfreq);
  389. m_ppar.harmonics.bandwidth = *getFloatParameter(cpi_harmonicsbw);
  390. m_ppar.harmonics.gauss = getParameter(cpi_harmonicsgauss);
  391. m_ppar.octave.om2 = *getFloatParameter(cpi_octavesm2);
  392. m_ppar.octave.om1 = *getFloatParameter(cpi_octavesm1);
  393. m_ppar.octave.o0 = *getFloatParameter(cpi_octaves0);
  394. m_ppar.octave.o1 = *getFloatParameter(cpi_octaves1);
  395. m_ppar.octave.o15 = *getFloatParameter(cpi_octaves15);
  396. m_ppar.octave.o2 = *getFloatParameter(cpi_octaves2);
  397. m_ppar.octave.enabled = true;
  398. m_ppar.filter.low = *getFloatParameter(cpi_filter_low);
  399. m_ppar.filter.high = *getFloatParameter(cpi_filter_high);
  400. m_ppar.tonal_vs_noise.enabled = (*getFloatParameter(cpi_tonalvsnoisebw)) > 0.75;
  401. m_ppar.tonal_vs_noise.bandwidth = *getFloatParameter(cpi_tonalvsnoisebw);
  402. m_ppar.tonal_vs_noise.preserve = *getFloatParameter(cpi_tonalvsnoisepreserve);
  403. m_stretch_source->setOnsetDetection(*getFloatParameter(cpi_onsetdetection));
  404. m_stretch_source->setLoopXFadeLength(*getFloatParameter(cpi_loopxfadelen));
  405. double t0 = *getFloatParameter(cpi_soundstart);
  406. double t1 = *getFloatParameter(cpi_soundend);
  407. if (t0 > t1)
  408. std::swap(t0, t1);
  409. if (t1 - t0 < 0.001)
  410. t1 = t0 + 0.001;
  411. m_stretch_source->setPlayRange({ t0,t1 }, true);
  412. m_stretch_source->setFreezing(getParameter(cpi_freeze));
  413. m_stretch_source->setPaused(getParameter(cpi_pause_enabled));
  414. m_stretch_source->setProcessParameters(&m_ppar);
  415. AudioSourceChannelInfo aif(buffer);
  416. if (isNonRealtime() || m_use_backgroundbuffering == false)
  417. {
  418. m_stretch_source->getNextAudioBlock(aif);
  419. }
  420. else
  421. {
  422. m_buffering_source->getNextAudioBlock(aif);
  423. }
  424. if (m_pass_input_through == true)
  425. {
  426. for (int i = 0; i < totalNumInputChannels; ++i)
  427. {
  428. buffer.addFrom(i, 0, m_input_buffer, i, 0, buffer.getNumSamples());
  429. }
  430. }
  431. for (int i = 0; i < buffer.getNumChannels(); ++i)
  432. {
  433. for (int j = 0; j < buffer.getNumSamples(); ++j)
  434. {
  435. float sample = buffer.getSample(i,j);
  436. if (std::isnan(sample) || std::isinf(sample))
  437. ++m_abnormal_output_samples;
  438. }
  439. }
  440. }
  441. //==============================================================================
  442. bool PaulstretchpluginAudioProcessor::hasEditor() const
  443. {
  444. return true; // (change this to false if you choose to not supply an editor)
  445. }
  446. AudioProcessorEditor* PaulstretchpluginAudioProcessor::createEditor()
  447. {
  448. return new PaulstretchpluginAudioProcessorEditor (*this);
  449. }
  450. //==============================================================================
  451. void PaulstretchpluginAudioProcessor::getStateInformation (MemoryBlock& destData)
  452. {
  453. ValueTree paramtree("paulstretch3pluginstate");
  454. for (int i=0;i<getNumParameters();++i)
  455. {
  456. auto par = getFloatParameter(i);
  457. if (par != nullptr)
  458. {
  459. paramtree.setProperty(par->paramID, (double)*par, nullptr);
  460. }
  461. }
  462. paramtree.setProperty(m_outchansparam->paramID, (int)*m_outchansparam, nullptr);
  463. if (m_current_file != File())
  464. {
  465. paramtree.setProperty("importedfile", m_current_file.getFullPathName(), nullptr);
  466. }
  467. auto specorder = m_stretch_source->getSpectrumProcessOrder();
  468. paramtree.setProperty("numspectralstages", (int)specorder.size(), nullptr);
  469. for (int i = 0; i < specorder.size(); ++i)
  470. {
  471. paramtree.setProperty("specorder" + String(i), specorder[i], nullptr);
  472. }
  473. if (m_use_backgroundbuffering)
  474. paramtree.setProperty("prebufamount", m_prebuffer_amount, nullptr);
  475. else
  476. paramtree.setProperty("prebufamount", -1, nullptr);
  477. MemoryOutputStream stream(destData,true);
  478. paramtree.writeToStream(stream);
  479. }
  480. void PaulstretchpluginAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
  481. {
  482. ValueTree tree = ValueTree::readFromData(data, sizeInBytes);
  483. if (tree.isValid())
  484. {
  485. {
  486. ScopedLock locker(m_cs);
  487. if (tree.hasProperty("numspectralstages"))
  488. {
  489. std::vector<int> order;
  490. int ordersize = tree.getProperty("numspectralstages");
  491. for (int i = 0; i < ordersize; ++i)
  492. {
  493. order.push_back((int)tree.getProperty("specorder" + String(i)));
  494. }
  495. m_stretch_source->setSpectrumProcessOrder(order);
  496. }
  497. for (int i = 0; i < getNumParameters(); ++i)
  498. {
  499. auto par = getFloatParameter(i);
  500. if (par != nullptr)
  501. {
  502. double parval = tree.getProperty(par->paramID, (double)*par);
  503. *par = parval;
  504. }
  505. }
  506. if (tree.hasProperty(m_outchansparam->paramID))
  507. *m_outchansparam = tree.getProperty(m_outchansparam->paramID, 2);
  508. }
  509. int prebufamt = tree.getProperty("prebufamount", 2);
  510. if (prebufamt==-1)
  511. m_use_backgroundbuffering = false;
  512. else
  513. setPreBufferAmount(prebufamt);
  514. String fn = tree.getProperty("importedfile");
  515. if (fn.isEmpty() == false)
  516. {
  517. File f(fn);
  518. setAudioFile(f);
  519. }
  520. }
  521. }
  522. void PaulstretchpluginAudioProcessor::setRecordingEnabled(bool b)
  523. {
  524. ScopedLock locker(m_cs);
  525. int lenbufframes = getSampleRateChecked()*m_max_reclen;
  526. if (b == true)
  527. {
  528. m_using_memory_buffer = true;
  529. m_current_file = File();
  530. m_recbuffer.setSize(2, m_max_reclen*getSampleRateChecked()+4096,false,false,true);
  531. m_recbuffer.clear();
  532. m_rec_pos = 0;
  533. callGUI(this,[this,lenbufframes](PaulstretchpluginAudioProcessorEditor* ed)
  534. {
  535. ed->beginAddingAudioBlocks(2, getSampleRateChecked(), lenbufframes);
  536. },false);
  537. m_is_recording = true;
  538. }
  539. else
  540. {
  541. if (m_is_recording == true)
  542. {
  543. finishRecording(lenbufframes);
  544. }
  545. }
  546. }
  547. double PaulstretchpluginAudioProcessor::getRecordingPositionPercent()
  548. {
  549. if (m_is_recording==false)
  550. return 0.0;
  551. return 1.0 / m_recbuffer.getNumSamples()*m_rec_pos;
  552. }
  553. String PaulstretchpluginAudioProcessor::setAudioFile(File f)
  554. {
  555. //if (f==File())
  556. // return String();
  557. //if (f==m_current_file && f.getLastModificationTime()==m_current_file_date)
  558. // return String();
  559. auto ai = unique_from_raw(m_afm->createReaderFor(f));
  560. if (ai != nullptr)
  561. {
  562. if (ai->numChannels > 32)
  563. {
  564. //MessageManager::callAsync([cb, file]() { cb("Too many channels in file " + file.getFullPathName()); });
  565. return "Too many channels in file "+f.getFullPathName();
  566. }
  567. if (ai->bitsPerSample>32)
  568. {
  569. //MessageManager::callAsync([cb, file]() { cb("Too high bit depth in file " + file.getFullPathName()); });
  570. return "Too high bit depth in file " + f.getFullPathName();
  571. }
  572. ScopedLock locker(m_cs);
  573. m_stretch_source->setAudioFile(f);
  574. m_current_file = f;
  575. m_current_file_date = m_current_file.getLastModificationTime();
  576. m_using_memory_buffer = false;
  577. return String();
  578. //MessageManager::callAsync([cb, file]() { cb(String()); });
  579. }
  580. return "Could not open file " + f.getFullPathName();
  581. }
  582. Range<double> PaulstretchpluginAudioProcessor::getTimeSelection()
  583. {
  584. return { *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) };
  585. }
  586. double PaulstretchpluginAudioProcessor::getPreBufferingPercent()
  587. {
  588. if (m_buffering_source==nullptr)
  589. return 0.0;
  590. return m_buffering_source->getPercentReady();
  591. }
  592. void PaulstretchpluginAudioProcessor::timerCallback(int id)
  593. {
  594. if (id == 1)
  595. {
  596. bool capture = getParameter(cpi_capture_enabled);
  597. if (capture == false && m_max_reclen != *getFloatParameter(cpi_max_capture_len))
  598. {
  599. m_max_reclen = *getFloatParameter(cpi_max_capture_len);
  600. //Logger::writeToLog("Changing max capture len to " + String(m_max_reclen));
  601. }
  602. if (capture == true && m_is_recording == false)
  603. {
  604. setRecordingEnabled(true);
  605. return;
  606. }
  607. if (capture == false && m_is_recording == true)
  608. {
  609. setRecordingEnabled(false);
  610. return;
  611. }
  612. if (m_cur_num_out_chans != *m_outchansparam)
  613. {
  614. jassert(m_curmaxblocksize > 0);
  615. ScopedLock locker(m_cs);
  616. m_ready_to_play = false;
  617. m_cur_num_out_chans = *m_outchansparam;
  618. //Logger::writeToLog("Switching to use " + String(m_cur_num_out_chans) + " out channels");
  619. String err;
  620. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  621. m_cur_num_out_chans, m_curmaxblocksize, err);
  622. m_ready_to_play = true;
  623. }
  624. }
  625. }
  626. void PaulstretchpluginAudioProcessor::finishRecording(int lenrecording)
  627. {
  628. m_is_recording = false;
  629. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer, getSampleRateChecked(), lenrecording);
  630. m_stretch_source->setPlayRange({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) }, true);
  631. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(getActiveEditor());
  632. if (ed)
  633. {
  634. //ed->setAudioBuffer(&m_recbuffer, getSampleRate(), lenrecording);
  635. }
  636. }
  637. //==============================================================================
  638. // This creates new instances of the plugin..
  639. AudioProcessor* JUCE_CALLTYPE createPluginFilter()
  640. {
  641. return new PaulstretchpluginAudioProcessor();
  642. }