You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

612 lines
21KB

  1. /*
  2. ==============================================================================
  3. This file was auto-generated!
  4. It contains the basic framework code for a JUCE plugin processor.
  5. ==============================================================================
  6. */
  7. #include "PluginProcessor.h"
  8. #include "PluginEditor.h"
  9. #include <set>
  10. #ifdef WIN32
  11. #undef min
  12. #undef max
  13. #endif
  14. std::set<PaulstretchpluginAudioProcessor*> g_activeprocessors;
  15. template<typename F>
  16. void callGUI(AudioProcessor* ap, F&& f, bool async)
  17. {
  18. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(ap->getActiveEditor());
  19. if (ed)
  20. {
  21. if (async == false)
  22. f(ed);
  23. else
  24. MessageManager::callAsync([ed,f]() { f(ed); });
  25. }
  26. }
  27. int get_optimized_updown(int n, bool up) {
  28. int orig_n = n;
  29. while (true) {
  30. n = orig_n;
  31. while (!(n % 11)) n /= 11;
  32. while (!(n % 7)) n /= 7;
  33. while (!(n % 5)) n /= 5;
  34. while (!(n % 3)) n /= 3;
  35. while (!(n % 2)) n /= 2;
  36. if (n<2) break;
  37. if (up) orig_n++;
  38. else orig_n--;
  39. if (orig_n<4) return 4;
  40. };
  41. return orig_n;
  42. };
  43. int optimizebufsize(int n) {
  44. int n1 = get_optimized_updown(n, false);
  45. int n2 = get_optimized_updown(n, true);
  46. if ((n - n1)<(n2 - n)) return n1;
  47. else return n2;
  48. };
  49. //==============================================================================
  50. PaulstretchpluginAudioProcessor::PaulstretchpluginAudioProcessor()
  51. : m_bufferingthread("pspluginprebufferthread")
  52. #ifndef JucePlugin_PreferredChannelConfigurations
  53. : AudioProcessor (BusesProperties()
  54. #if ! JucePlugin_IsMidiEffect
  55. #if ! JucePlugin_IsSynth
  56. .withInput ("Input", AudioChannelSet::stereo(), true)
  57. #endif
  58. .withOutput ("Output", AudioChannelSet::stereo(), true)
  59. #endif
  60. )
  61. #endif
  62. {
  63. g_activeprocessors.insert(this);
  64. m_recbuffer.setSize(2, 44100);
  65. m_recbuffer.clear();
  66. if (m_afm->getNumKnownFormats()==0)
  67. m_afm->registerBasicFormats();
  68. m_stretch_source = std::make_unique<StretchAudioSource>(2, m_afm);
  69. setPreBufferAmount(2);
  70. m_ppar.pitch_shift.enabled = true;
  71. m_ppar.freq_shift.enabled = true;
  72. m_ppar.filter.enabled = true;
  73. m_ppar.compressor.enabled = true;
  74. m_stretch_source->setOnsetDetection(0.0);
  75. m_stretch_source->setLoopingEnabled(true);
  76. m_stretch_source->setFFTWindowingType(1);
  77. addParameter(new AudioParameterFloat("mainvolume0", "Main volume", -24.0f, 12.0f, -3.0f)); // 0
  78. addParameter(new AudioParameterFloat("stretchamount0", "Stretch amount",
  79. NormalisableRange<float>(0.1f, 1024.0f, 0.01f, 0.25),1.0f)); // 1
  80. addParameter(new AudioParameterFloat("fftsize0", "FFT size", 0.0f, 1.0f, 0.7f)); // 2
  81. addParameter(new AudioParameterFloat("pitchshift0", "Pitch shift", -24.0f, 24.0f, 0.0f)); // 3
  82. addParameter(new AudioParameterFloat("freqshift0", "Frequency shift", -1000.0f, 1000.0f, 0.0f)); // 4
  83. addParameter(new AudioParameterFloat("playrange_start0", "Sound start", 0.0f, 1.0f, 0.0f)); // 5
  84. addParameter(new AudioParameterFloat("playrange_end0", "Sound end", 0.0f, 1.0f, 1.0f)); // 6
  85. addParameter(new AudioParameterBool("freeze0", "Freeze", false)); // 7
  86. addParameter(new AudioParameterFloat("spread0", "Frequency spread", 0.0f, 1.0f, 0.0f)); // 8
  87. addParameter(new AudioParameterFloat("compress0", "Compress", 0.0f, 1.0f, 0.0f)); // 9
  88. addParameter(new AudioParameterFloat("loopxfadelen0", "Loop xfade length", 0.0f, 1.0f, 0.0f)); // 10
  89. addParameter(new AudioParameterFloat("numharmonics0", "Num harmonics", 0.0f, 100.0f, 0.0f)); // 11
  90. addParameter(new AudioParameterFloat("harmonicsfreq0", "Harmonics base freq",
  91. NormalisableRange<float>(1.0f, 5000.0f, 1.00f, 0.5), 128.0f)); // 12
  92. addParameter(new AudioParameterFloat("harmonicsbw0", "Harmonics bandwidth", 0.1f, 200.0f, 25.0f)); // 13
  93. addParameter(new AudioParameterBool("harmonicsgauss0", "Gaussian harmonics", false)); // 14
  94. addParameter(new AudioParameterFloat("octavemixm2_0", "2 octaves down level", 0.0f, 1.0f, 0.0f)); // 15
  95. addParameter(new AudioParameterFloat("octavemixm1_0", "Octave down level", 0.0f, 1.0f, 0.0f)); // 16
  96. addParameter(new AudioParameterFloat("octavemix0_0", "Normal pitch level", 0.0f, 1.0f, 1.0f)); // 17
  97. addParameter(new AudioParameterFloat("octavemix1_0", "1 octave up level", 0.0f, 1.0f, 0.0f)); // 18
  98. addParameter(new AudioParameterFloat("octavemix15_0", "1 octave and fifth up level", 0.0f, 1.0f, 0.0f)); // 19
  99. addParameter(new AudioParameterFloat("octavemix2_0", "2 octaves up level", 0.0f, 1.0f, 0.0f)); // 20
  100. addParameter(new AudioParameterFloat("tonalvsnoisebw_0", "Tonal vs Noise BW", 0.74f, 1.0f, 0.74f)); // 21
  101. addParameter(new AudioParameterFloat("tonalvsnoisepreserve_0", "Tonal vs Noise preserve", -1.0f, 1.0f, 0.5f)); // 22
  102. auto filt_convertFrom0To1Func = [](float rangemin, float rangemax, float value)
  103. {
  104. if (value < 0.5f)
  105. return jmap<float>(value, 0.0f, 0.5f, 20.0f, 1000.0f);
  106. return jmap<float>(value, 0.5f, 1.0f, 1000.0f, 20000.0f);
  107. };
  108. auto filt_convertTo0To1Func = [](float rangemin, float rangemax, float value)
  109. {
  110. if (value < 1000.0f)
  111. return jmap<float>(value, 20.0f, 1000.0f, 0.0f, 0.5f);
  112. return jmap<float>(value, 1000.0f, 20000.0f, 0.5f, 1.0f);
  113. };
  114. addParameter(new AudioParameterFloat("filter_low_0", "Filter low",
  115. NormalisableRange<float>(20.0f, 20000.0f,
  116. filt_convertFrom0To1Func, filt_convertTo0To1Func), 20.0f)); // 23
  117. addParameter(new AudioParameterFloat("filter_high_0", "Filter high",
  118. NormalisableRange<float>(20.0f, 20000.0f,
  119. filt_convertFrom0To1Func,filt_convertTo0To1Func), 20000.0f));; // 24
  120. addParameter(new AudioParameterFloat("onsetdetect_0", "Onset detection", 0.0f, 1.0f, 0.0f)); // 25
  121. addParameter(new AudioParameterBool("capture_enabled0", "Capture", false)); // 26
  122. m_outchansparam = new AudioParameterInt("numoutchans0", "Num output channels", 2, 8, 2); // 27
  123. addParameter(m_outchansparam); // 27
  124. addParameter(new AudioParameterBool("pause_enabled0", "Pause", false)); // 28
  125. startTimer(1, 50);
  126. }
  127. PaulstretchpluginAudioProcessor::~PaulstretchpluginAudioProcessor()
  128. {
  129. g_activeprocessors.erase(this);
  130. m_bufferingthread.stopThread(1000);
  131. }
  132. void PaulstretchpluginAudioProcessor::setPreBufferAmount(int x)
  133. {
  134. int temp = jlimit(0, 5, x);
  135. if (temp != m_prebuffer_amount)
  136. {
  137. m_prebuffer_amount = temp;
  138. m_recreate_buffering_source = true;
  139. }
  140. }
  141. //==============================================================================
  142. const String PaulstretchpluginAudioProcessor::getName() const
  143. {
  144. return JucePlugin_Name;
  145. }
  146. bool PaulstretchpluginAudioProcessor::acceptsMidi() const
  147. {
  148. #if JucePlugin_WantsMidiInput
  149. return true;
  150. #else
  151. return false;
  152. #endif
  153. }
  154. bool PaulstretchpluginAudioProcessor::producesMidi() const
  155. {
  156. #if JucePlugin_ProducesMidiOutput
  157. return true;
  158. #else
  159. return false;
  160. #endif
  161. }
  162. bool PaulstretchpluginAudioProcessor::isMidiEffect() const
  163. {
  164. #if JucePlugin_IsMidiEffect
  165. return true;
  166. #else
  167. return false;
  168. #endif
  169. }
  170. double PaulstretchpluginAudioProcessor::getTailLengthSeconds() const
  171. {
  172. return 0.0;
  173. //return (double)m_bufamounts[m_prebuffer_amount]/getSampleRate();
  174. }
  175. int PaulstretchpluginAudioProcessor::getNumPrograms()
  176. {
  177. return 1; // NB: some hosts don't cope very well if you tell them there are 0 programs,
  178. // so this should be at least 1, even if you're not really implementing programs.
  179. }
  180. int PaulstretchpluginAudioProcessor::getCurrentProgram()
  181. {
  182. return 0;
  183. }
  184. void PaulstretchpluginAudioProcessor::setCurrentProgram (int index)
  185. {
  186. }
  187. const String PaulstretchpluginAudioProcessor::getProgramName (int index)
  188. {
  189. return {};
  190. }
  191. void PaulstretchpluginAudioProcessor::changeProgramName (int index, const String& newName)
  192. {
  193. }
  194. void PaulstretchpluginAudioProcessor::setFFTSize(double size)
  195. {
  196. if (m_prebuffer_amount == 5)
  197. m_fft_size_to_use = pow(2, 7.0 + size * 14.5);
  198. else m_fft_size_to_use = pow(2, 7.0 + size * 10.0); // chicken out from allowing huge FFT sizes if not enough prebuffering
  199. int optim = optimizebufsize(m_fft_size_to_use);
  200. m_fft_size_to_use = optim;
  201. m_stretch_source->setFFTSize(optim);
  202. //Logger::writeToLog(String(m_fft_size_to_use));
  203. }
  204. void PaulstretchpluginAudioProcessor::startplay(Range<double> playrange, int numoutchans, int maxBlockSize, String& err)
  205. {
  206. m_stretch_source->setPlayRange(playrange, true);
  207. int bufamt = m_bufamounts[m_prebuffer_amount];
  208. if (m_buffering_source != nullptr && numoutchans != m_buffering_source->getNumberOfChannels())
  209. m_recreate_buffering_source = true;
  210. if (m_recreate_buffering_source == true)
  211. {
  212. m_buffering_source = std::make_unique<MyBufferingAudioSource>(m_stretch_source.get(),
  213. m_bufferingthread, false, bufamt, numoutchans, false);
  214. m_recreate_buffering_source = false;
  215. }
  216. if (m_bufferingthread.isThreadRunning() == false)
  217. m_bufferingthread.startThread();
  218. m_stretch_source->setNumOutChannels(numoutchans);
  219. m_stretch_source->setFFTSize(m_fft_size_to_use);
  220. m_stretch_source->setProcessParameters(&m_ppar);
  221. m_last_outpos_pos = 0.0;
  222. m_last_in_pos = playrange.getStart()*m_stretch_source->getInfileLengthSeconds();
  223. m_buffering_source->prepareToPlay(maxBlockSize, getSampleRateChecked());
  224. }
  225. double PaulstretchpluginAudioProcessor::getSampleRateChecked()
  226. {
  227. if (m_cur_sr < 1.0)
  228. return 44100.0;
  229. return m_cur_sr;
  230. }
  231. void PaulstretchpluginAudioProcessor::prepareToPlay(double sampleRate, int samplesPerBlock)
  232. {
  233. ScopedLock locker(m_cs);
  234. m_cur_sr = sampleRate;
  235. m_curmaxblocksize = samplesPerBlock;
  236. int numoutchans = *m_outchansparam;
  237. if (numoutchans != m_cur_num_out_chans)
  238. m_ready_to_play = false;
  239. if (m_using_memory_buffer == true)
  240. {
  241. int len = jlimit(100,m_recbuffer.getNumSamples(), m_rec_pos);
  242. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer,
  243. getSampleRateChecked(),
  244. len);
  245. callGUI(this,[this,len](auto ed) { ed->setAudioBuffer(&m_recbuffer, getSampleRateChecked(), len); },false);
  246. }
  247. if (m_ready_to_play == false)
  248. {
  249. setFFTSize(*getFloatParameter(cpi_fftsize));
  250. m_stretch_source->setProcessParameters(&m_ppar);
  251. m_stretch_source->setFFTWindowingType(1);
  252. String err;
  253. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  254. numoutchans, samplesPerBlock, err);
  255. m_cur_num_out_chans = numoutchans;
  256. m_ready_to_play = true;
  257. }
  258. }
  259. void PaulstretchpluginAudioProcessor::releaseResources()
  260. {
  261. //m_control->stopplay();
  262. //m_ready_to_play = false;
  263. }
  264. #ifndef JucePlugin_PreferredChannelConfigurations
  265. bool PaulstretchpluginAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
  266. {
  267. #if JucePlugin_IsMidiEffect
  268. ignoreUnused (layouts);
  269. return true;
  270. #else
  271. // This is the place where you check if the layout is supported.
  272. // In this template code we only support mono or stereo.
  273. if (layouts.getMainOutputChannelSet() != AudioChannelSet::mono()
  274. && layouts.getMainOutputChannelSet() != AudioChannelSet::stereo())
  275. return false;
  276. // This checks if the input layout matches the output layout
  277. #if ! JucePlugin_IsSynth
  278. if (layouts.getMainOutputChannelSet() != layouts.getMainInputChannelSet())
  279. return false;
  280. #endif
  281. return true;
  282. #endif
  283. }
  284. #endif
  285. void copyAudioBufferWrappingPosition(const AudioBuffer<float>& src, AudioBuffer<float>& dest, int destbufpos, int maxdestpos)
  286. {
  287. for (int i = 0; i < dest.getNumChannels(); ++i)
  288. {
  289. int channel_to_copy = i % src.getNumChannels();
  290. if (destbufpos + src.getNumSamples() > maxdestpos)
  291. {
  292. int wrappos = (destbufpos + src.getNumSamples()) % maxdestpos;
  293. int partial_len = src.getNumSamples() - wrappos;
  294. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, partial_len);
  295. dest.copyFrom(channel_to_copy, partial_len, src, channel_to_copy, 0, wrappos);
  296. }
  297. else
  298. {
  299. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, src.getNumSamples());
  300. }
  301. }
  302. }
  303. void PaulstretchpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
  304. {
  305. ScopedLock locker(m_cs);
  306. ScopedNoDenormals noDenormals;
  307. double srtemp = getSampleRate();
  308. if (srtemp != m_cur_sr)
  309. m_cur_sr = srtemp;
  310. const int totalNumInputChannels = getTotalNumInputChannels();
  311. const int totalNumOutputChannels = getTotalNumOutputChannels();
  312. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
  313. buffer.clear (i, 0, buffer.getNumSamples());
  314. if (m_ready_to_play == false)
  315. return;
  316. if (m_is_recording == true)
  317. {
  318. int recbuflenframes = m_max_reclen * getSampleRate();
  319. copyAudioBufferWrappingPosition(buffer, m_recbuffer, m_rec_pos, recbuflenframes);
  320. callGUI(this,[this, &buffer](PaulstretchpluginAudioProcessorEditor*ed)
  321. {
  322. ed->addAudioBlock(buffer, getSampleRate(), m_rec_pos);
  323. }, false);
  324. m_rec_pos = (m_rec_pos + buffer.getNumSamples()) % recbuflenframes;
  325. return;
  326. }
  327. jassert(m_buffering_source != nullptr);
  328. jassert(m_bufferingthread.isThreadRunning());
  329. m_stretch_source->setMainVolume(*getFloatParameter(cpi_main_volume));
  330. m_stretch_source->setRate(*getFloatParameter(cpi_stretchamount));
  331. setFFTSize(*getFloatParameter(cpi_fftsize));
  332. m_ppar.pitch_shift.cents = *getFloatParameter(cpi_pitchshift) * 100.0;
  333. m_ppar.freq_shift.Hz = *getFloatParameter(cpi_frequencyshift);
  334. m_ppar.spread.enabled = *getFloatParameter(cpi_spreadamount) > 0.0f;
  335. m_ppar.spread.bandwidth = *getFloatParameter(cpi_spreadamount);
  336. m_ppar.compressor.enabled = *getFloatParameter(cpi_compress)>0.0f;
  337. m_ppar.compressor.power = *getFloatParameter(cpi_compress);
  338. m_ppar.harmonics.enabled = *getFloatParameter(cpi_numharmonics)>=1.0;
  339. m_ppar.harmonics.nharmonics = *getFloatParameter(cpi_numharmonics);
  340. m_ppar.harmonics.freq = *getFloatParameter(cpi_harmonicsfreq);
  341. m_ppar.harmonics.bandwidth = *getFloatParameter(cpi_harmonicsbw);
  342. m_ppar.harmonics.gauss = getParameter(cpi_harmonicsgauss);
  343. m_ppar.octave.om2 = *getFloatParameter(cpi_octavesm2);
  344. m_ppar.octave.om1 = *getFloatParameter(cpi_octavesm1);
  345. m_ppar.octave.o0 = *getFloatParameter(cpi_octaves0);
  346. m_ppar.octave.o1 = *getFloatParameter(cpi_octaves1);
  347. m_ppar.octave.o15 = *getFloatParameter(cpi_octaves15);
  348. m_ppar.octave.o2 = *getFloatParameter(cpi_octaves2);
  349. m_ppar.octave.enabled = true;
  350. m_ppar.filter.low = *getFloatParameter(cpi_filter_low);
  351. m_ppar.filter.high = *getFloatParameter(cpi_filter_high);
  352. m_ppar.tonal_vs_noise.enabled = (*getFloatParameter(cpi_tonalvsnoisebw)) > 0.75;
  353. m_ppar.tonal_vs_noise.bandwidth = *getFloatParameter(cpi_tonalvsnoisebw);
  354. m_ppar.tonal_vs_noise.preserve = *getFloatParameter(cpi_tonalvsnoisepreserve);
  355. m_stretch_source->setOnsetDetection(*getFloatParameter(cpi_onsetdetection));
  356. m_stretch_source->setLoopXFadeLength(*getFloatParameter(cpi_loopxfadelen));
  357. double t0 = *getFloatParameter(cpi_soundstart);
  358. double t1 = *getFloatParameter(cpi_soundend);
  359. if (t0 > t1)
  360. std::swap(t0, t1);
  361. if (t1 - t0 < 0.001)
  362. t1 = t0 + 0.001;
  363. m_stretch_source->setPlayRange({ t0,t1 }, true);
  364. m_stretch_source->setFreezing(getParameter(cpi_freeze));
  365. m_stretch_source->setPaused(getParameter(cpi_pause_enabled));
  366. m_stretch_source->setProcessParameters(&m_ppar);
  367. AudioSourceChannelInfo aif(buffer);
  368. m_buffering_source->getNextAudioBlock(aif);
  369. }
  370. //==============================================================================
  371. bool PaulstretchpluginAudioProcessor::hasEditor() const
  372. {
  373. return true; // (change this to false if you choose to not supply an editor)
  374. }
  375. AudioProcessorEditor* PaulstretchpluginAudioProcessor::createEditor()
  376. {
  377. return new PaulstretchpluginAudioProcessorEditor (*this);
  378. }
  379. //==============================================================================
  380. void PaulstretchpluginAudioProcessor::getStateInformation (MemoryBlock& destData)
  381. {
  382. ValueTree paramtree("paulstretch3pluginstate");
  383. for (int i=0;i<getNumParameters();++i)
  384. {
  385. auto par = getFloatParameter(i);
  386. if (par != nullptr)
  387. {
  388. paramtree.setProperty(par->paramID, (double)*par, nullptr);
  389. }
  390. }
  391. paramtree.setProperty(m_outchansparam->paramID, (int)*m_outchansparam, nullptr);
  392. if (m_current_file != File())
  393. {
  394. paramtree.setProperty("importedfile", m_current_file.getFullPathName(), nullptr);
  395. }
  396. auto specorder = m_stretch_source->getSpectrumProcessOrder();
  397. paramtree.setProperty("numspectralstages", (int)specorder.size(), nullptr);
  398. for (int i = 0; i < specorder.size(); ++i)
  399. {
  400. paramtree.setProperty("specorder" + String(i), specorder[i], nullptr);
  401. }
  402. MemoryOutputStream stream(destData,true);
  403. paramtree.writeToStream(stream);
  404. }
  405. void PaulstretchpluginAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
  406. {
  407. ValueTree tree = ValueTree::readFromData(data, sizeInBytes);
  408. if (tree.isValid())
  409. {
  410. {
  411. ScopedLock locker(m_cs);
  412. if (tree.hasProperty("numspectralstages"))
  413. {
  414. std::vector<int> order;
  415. int ordersize = tree.getProperty("numspectralstages");
  416. for (int i = 0; i < ordersize; ++i)
  417. {
  418. order.push_back((int)tree.getProperty("specorder" + String(i)));
  419. }
  420. m_stretch_source->setSpectrumProcessOrder(order);
  421. }
  422. for (int i = 0; i < getNumParameters(); ++i)
  423. {
  424. auto par = getFloatParameter(i);
  425. if (par != nullptr)
  426. {
  427. double parval = tree.getProperty(par->paramID, (double)*par);
  428. *par = parval;
  429. }
  430. }
  431. if (tree.hasProperty(m_outchansparam->paramID))
  432. *m_outchansparam = tree.getProperty(m_outchansparam->paramID, 2);
  433. }
  434. String fn = tree.getProperty("importedfile");
  435. if (fn.isEmpty() == false)
  436. {
  437. File f(fn);
  438. setAudioFile(f);
  439. }
  440. }
  441. }
  442. void PaulstretchpluginAudioProcessor::setRecordingEnabled(bool b)
  443. {
  444. ScopedLock locker(m_cs);
  445. int lenbufframes = getSampleRateChecked()*m_max_reclen;
  446. if (b == true)
  447. {
  448. m_using_memory_buffer = true;
  449. m_current_file = File();
  450. m_recbuffer.setSize(2, m_max_reclen*getSampleRateChecked()+4096,false,false,true);
  451. m_recbuffer.clear();
  452. m_rec_pos = 0;
  453. callGUI(this,[this,lenbufframes](PaulstretchpluginAudioProcessorEditor* ed)
  454. {
  455. ed->beginAddingAudioBlocks(2, getSampleRateChecked(), lenbufframes);
  456. },false);
  457. m_is_recording = true;
  458. }
  459. else
  460. {
  461. if (m_is_recording == true)
  462. {
  463. finishRecording(lenbufframes);
  464. }
  465. }
  466. }
  467. double PaulstretchpluginAudioProcessor::getRecordingPositionPercent()
  468. {
  469. if (m_is_recording==false)
  470. return 0.0;
  471. return 1.0 / m_recbuffer.getNumSamples()*m_rec_pos;
  472. }
  473. String PaulstretchpluginAudioProcessor::setAudioFile(File f)
  474. {
  475. //if (f==File())
  476. // return String();
  477. //if (f==m_current_file && f.getLastModificationTime()==m_current_file_date)
  478. // return String();
  479. auto ai = unique_from_raw(m_afm->createReaderFor(f));
  480. if (ai != nullptr)
  481. {
  482. if (ai->numChannels > 32)
  483. {
  484. //MessageManager::callAsync([cb, file]() { cb("Too many channels in file " + file.getFullPathName()); });
  485. return "Too many channels in file "+f.getFullPathName();
  486. }
  487. if (ai->bitsPerSample>32)
  488. {
  489. //MessageManager::callAsync([cb, file]() { cb("Too high bit depth in file " + file.getFullPathName()); });
  490. return "Too high bit depth in file " + f.getFullPathName();
  491. }
  492. ScopedLock locker(m_cs);
  493. m_stretch_source->setAudioFile(f);
  494. m_current_file = f;
  495. m_current_file_date = m_current_file.getLastModificationTime();
  496. m_using_memory_buffer = false;
  497. return String();
  498. //MessageManager::callAsync([cb, file]() { cb(String()); });
  499. }
  500. return "Could not open file " + f.getFullPathName();
  501. }
  502. Range<double> PaulstretchpluginAudioProcessor::getTimeSelection()
  503. {
  504. return { *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) };
  505. }
  506. double PaulstretchpluginAudioProcessor::getPreBufferingPercent()
  507. {
  508. if (m_buffering_source==nullptr)
  509. return 0.0;
  510. return m_buffering_source->getPercentReady();
  511. }
  512. void PaulstretchpluginAudioProcessor::timerCallback(int id)
  513. {
  514. if (id == 1)
  515. {
  516. bool capture = getParameter(cpi_capture_enabled);
  517. if (capture == true && m_is_recording == false)
  518. {
  519. setRecordingEnabled(true);
  520. return;
  521. }
  522. if (capture == false && m_is_recording == true)
  523. {
  524. setRecordingEnabled(false);
  525. return;
  526. }
  527. if (m_cur_num_out_chans != *m_outchansparam)
  528. {
  529. jassert(m_curmaxblocksize > 0);
  530. ScopedLock locker(m_cs);
  531. m_ready_to_play = false;
  532. m_cur_num_out_chans = *m_outchansparam;
  533. //Logger::writeToLog("Switching to use " + String(m_cur_num_out_chans) + " out channels");
  534. String err;
  535. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  536. m_cur_num_out_chans, m_curmaxblocksize, err);
  537. m_ready_to_play = true;
  538. }
  539. }
  540. }
  541. void PaulstretchpluginAudioProcessor::finishRecording(int lenrecording)
  542. {
  543. m_is_recording = false;
  544. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer, getSampleRateChecked(), lenrecording);
  545. m_stretch_source->setPlayRange({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) }, true);
  546. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(getActiveEditor());
  547. if (ed)
  548. {
  549. //ed->setAudioBuffer(&m_recbuffer, getSampleRate(), lenrecording);
  550. }
  551. }
  552. //==============================================================================
  553. // This creates new instances of the plugin..
  554. AudioProcessor* JUCE_CALLTYPE createPluginFilter()
  555. {
  556. return new PaulstretchpluginAudioProcessor();
  557. }