You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

783 lines
31KB

  1. /*
  2. Copyright (C) 2006-2011 Nasca Octavian Paul
  3. Author: Nasca Octavian Paul
  4. Copyright (C) 2017 Xenakios
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of version 2 of the GNU General Public License
  7. as published by the Free Software Foundation.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License (version 2) for more details.
  12. You should have received a copy of the GNU General Public License (version 2)
  13. along with this program; if not, write to the Free Software Foundation,
  14. Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  15. */
  16. #include "PluginProcessor.h"
  17. #include "PluginEditor.h"
  18. #include <set>
  19. #ifdef WIN32
  20. #undef min
  21. #undef max
  22. #endif
  23. String g_plugintitle{ "PaulXStretch 1.0.0 preview 4" };
  24. std::set<PaulstretchpluginAudioProcessor*> g_activeprocessors;
  25. struct PresetEntry
  26. {
  27. PresetEntry(String name, String data) : m_name(name), m_data(data) {}
  28. String m_name;
  29. String m_data;
  30. };
  31. static const PresetEntry g_presets[] =
  32. {
  33. {"Factory reset","cGF1bHN0cmV0Y2gzcGx1Z2luc3RhdGUAASZtYWludm9sdW1lMAABCQQAAACAi1cewHN0cmV0Y2hhbW91bnQwAAEJBAAAACAAAPA/ZmZ0c2l6ZTAAAQkEAAAAYGZm5j9waXRjaHNoaWZ0MAABCQQAAAAAAAAAAGZyZXFzaGlmdDAAAQkEAAAAAAAAAABwbGF5cmFuZ2Vfc3RhcnQwAAEJBAAAAAAAAAAAcGxheXJhbmdlX2VuZDAAAQkEAAAAAAAA8D9zcHJlYWQwAAEJBAAAAAAAAAAAY29tcHJlc3MwAAEJBAAAAAAAAAAAbG9vcHhmYWRlbGVuMAABCQQAAABA4XqEP251bWhhcm1vbmljczAAAQkEAAAAAABAWUBoYXJtb25pY3NmcmVxMAABCQQAAAAAAABgQGhhcm1vbmljc2J3MAABCQQAAAAAAAA5QG9jdGF2ZW1peG0yXzAAAQkEAAAAAAAAAABvY3RhdmVtaXhtMV8wAAEJBAAAAAAAAAAAb2N0YXZlbWl4MF8wAAEJBAAAAAAAAPA/b2N0YXZlbWl4MV8wAAEJBAAAAAAAAAAAb2N0YXZlbWl4MTVfMAABCQQAAAAAAAAAAG9jdGF2ZW1peDJfMAABCQQAAAAAAAAAAHRvbmFsdnNub2lzZWJ3XzAAAQkEAAAAgBSu5z90b25hbHZzbm9pc2VwcmVzZXJ2ZV8wAAEJBAAAAAAAAOA/ZmlsdGVyX2xvd18wAAEJBAAAAAAAADRAZmlsdGVyX2hpZ2hfMAABCQQAAAAAAIjTQG9uc2V0ZGV0ZWN0XzAAAQkEAAAAAAAAAABtYXhjYXB0dXJlbGVuXzAAAQkEAAAAAAAAJEBudW1vdXRjaGFuczAAAQUBAgAAAGltcG9ydGVkZmlsZQABKAVDOlxNdXNpY0F1ZGlvXHNvdXJjZXNhbXBsZXNcc2hlaWxhLndhdgBudW1zcGVjdHJhbHN0YWdlcwABBQEIAAAAc3BlY29yZGVyMAABBQEDAAAAc3BlY29yZGVyMQABBQEAAAAAc3BlY29yZGVyMgABBQEBAAAAc3BlY29yZGVyMwABBQECAAAAc3BlY29yZGVyNAABBQEEAAAAc3BlY29yZGVyNQABBQEFAAAAc3BlY29yZGVyNgABBQEGAAAAc3BlY29yZGVyNwABBQEHAAAAcHJlYnVmYW1vdW50AAEFAQIAAABsb2FkZmlsZXdpdGhzdGF0ZQABAQIAzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3NzQ=="},
  34. {"Chipmunk","cGF1bHN0cmV0Y2gzcGx1Z2luc3RhdGUAASZtYWludm9sdW1lMAABCQQAAACAi1cewHN0cmV0Y2hhbW91bnQwAAEJBAAAAAAAAOA/ZmZ0c2l6ZTAAAQkEAAAAAAAA4D9waXRjaHNoaWZ0MAABCQQAAAAAAAAoQGZyZXFzaGlmdDAAAQkEAAAAAAAAAABwbGF5cmFuZ2Vfc3RhcnQwAAEJBAAAAAAAAAAAcGxheXJhbmdlX2VuZDAAAQkEAAAAAAAA8D9zcHJlYWQwAAEJBAAAAAAAAAAAY29tcHJlc3MwAAEJBAAAAAAAAAAAbG9vcHhmYWRlbGVuMAABCQQAAABA4XqUP251bWhhcm1vbmljczAAAQkEAAAAAABAWUBoYXJtb25pY3NmcmVxMAABCQQAAAAAAABgQGhhcm1vbmljc2J3MAABCQQAAAAAAAA5QG9jdGF2ZW1peG0yXzAAAQkEAAAAAAAAAABvY3RhdmVtaXhtMV8wAAEJBAAAAAAAAAAAb2N0YXZlbWl4MF8wAAEJBAAAAAAAAPA/b2N0YXZlbWl4MV8wAAEJBAAAAAAAAAAAb2N0YXZlbWl4MTVfMAABCQQAAAAAAAAAAG9jdGF2ZW1peDJfMAABCQQAAAAAAAAAAHRvbmFsdnNub2lzZWJ3XzAAAQkEAAAAgBSu5z90b25hbHZzbm9pc2VwcmVzZXJ2ZV8wAAEJBAAAAAAAAOA/ZmlsdGVyX2xvd18wAAEJBAAAAAAAADRAZmlsdGVyX2hpZ2hfMAABCQQAAAAAAIjTQG9uc2V0ZGV0ZWN0XzAAAQkEAAAAAAAAAABtYXhjYXB0dXJlbGVuXzAAAQkEAAAAAAAAJEBudW1vdXRjaGFuczAAAQUBAgAAAGltcG9ydGVkZmlsZQABKAVDOlxNdXNpY0F1ZGlvXHNvdXJjZXNhbXBsZXNcc2hlaWxhLndhdgBudW1zcGVjdHJhbHN0YWdlcwABBQEIAAAAc3BlY29yZGVyMAABBQEAAAAAc3BlY29yZGVyMQABBQEBAAAAc3BlY29yZGVyMgABBQECAAAAc3BlY29yZGVyMwABBQEDAAAAc3BlY29yZGVyNAABBQEEAAAAc3BlY29yZGVyNQABBQEFAAAAc3BlY29yZGVyNgABBQEGAAAAc3BlY29yZGVyNwABBQEHAAAAcHJlYnVmYW1vdW50AAEFAQIAAABsb2FkZmlsZXdpdGhzdGF0ZQABAQIAzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3NzQ=="},
  35. {"Chipmunk harmonic series","cGF1bHN0cmV0Y2gzcGx1Z2luc3RhdGUAASZtYWludm9sdW1lMAABCQQAAACAi1cewHN0cmV0Y2hhbW91bnQwAAEJBAAAAAAAAOA/ZmZ0c2l6ZTAAAQkEAAAAoJmZ2T9waXRjaHNoaWZ0MAABCQQAAAAAAAAoQGZyZXFzaGlmdDAAAQkEAAAAAAAAAABwbGF5cmFuZ2Vfc3RhcnQwAAEJBAAAAAAAAAAAcGxheXJhbmdlX2VuZDAAAQkEAAAAAAAA8D9zcHJlYWQwAAEJBAAAAAAAAAAAY29tcHJlc3MwAAEJBAAAAAAAAAAAbG9vcHhmYWRlbGVuMAABCQQAAABA4XqUP251bWhhcm1vbmljczAAAQkEAAAAQMTsSkBoYXJtb25pY3NmcmVxMAABCQQAAAAAAABQQGhhcm1vbmljc2J3MAABCQQAAAAAAAA5QG9jdGF2ZW1peG0yXzAAAQkEAAAAAAAAAABvY3RhdmVtaXhtMV8wAAEJBAAAAAAAAAAAb2N0YXZlbWl4MF8wAAEJBAAAAAAAAPA/b2N0YXZlbWl4MV8wAAEJBAAAAAAAAAAAb2N0YXZlbWl4MTVfMAABCQQAAAAAAAAAAG9jdGF2ZW1peDJfMAABCQQAAAAAAAAAAHRvbmFsdnNub2lzZWJ3XzAAAQkEAAAAgBSu5z90b25hbHZzbm9pc2VwcmVzZXJ2ZV8wAAEJBAAAAAAAAOA/ZmlsdGVyX2xvd18wAAEJBAAAAAAAADRAZmlsdGVyX2hpZ2hfMAABCQQAAAAAAIjTQG9uc2V0ZGV0ZWN0XzAAAQkEAAAAAAAAAABtYXhjYXB0dXJlbGVuXzAAAQkEAAAAAAAAJEBudW1vdXRjaGFuczAAAQUBAgAAAGltcG9ydGVkZmlsZQABKAVDOlxNdXNpY0F1ZGlvXHNvdXJjZXNhbXBsZXNcc2hlaWxhLndhdgBudW1zcGVjdHJhbHN0YWdlcwABBQEIAAAAc3BlY29yZGVyMAABBQEDAAAAc3BlY29yZGVyMQABBQEAAAAAc3BlY29yZGVyMgABBQEBAAAAc3BlY29yZGVyMwABBQECAAAAc3BlY29yZGVyNAABBQEEAAAAc3BlY29yZGVyNQABBQEFAAAAc3BlY29yZGVyNgABBQEGAAAAc3BlY29yZGVyNwABBQEHAAAAcHJlYnVmYW1vdW50AAEFAQIAAABsb2FkZmlsZXdpdGhzdGF0ZQABAQIAzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3Nzc3NzQ=="}
  36. };
  37. template<typename F>
  38. void callGUI(AudioProcessor* ap, F&& f, bool async)
  39. {
  40. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(ap->getActiveEditor());
  41. if (ed)
  42. {
  43. if (async == false)
  44. f(ed);
  45. else
  46. MessageManager::callAsync([ed,f]() { f(ed); });
  47. }
  48. }
  49. int get_optimized_updown(int n, bool up) {
  50. int orig_n = n;
  51. while (true) {
  52. n = orig_n;
  53. while (!(n % 11)) n /= 11;
  54. while (!(n % 7)) n /= 7;
  55. while (!(n % 5)) n /= 5;
  56. while (!(n % 3)) n /= 3;
  57. while (!(n % 2)) n /= 2;
  58. if (n<2) break;
  59. if (up) orig_n++;
  60. else orig_n--;
  61. if (orig_n<4) return 4;
  62. };
  63. return orig_n;
  64. };
  65. int optimizebufsize(int n) {
  66. int n1 = get_optimized_updown(n, false);
  67. int n2 = get_optimized_updown(n, true);
  68. if ((n - n1)<(n2 - n)) return n1;
  69. else return n2;
  70. };
  71. //==============================================================================
  72. PaulstretchpluginAudioProcessor::PaulstretchpluginAudioProcessor()
  73. : m_bufferingthread("pspluginprebufferthread")
  74. #ifndef JucePlugin_PreferredChannelConfigurations
  75. : AudioProcessor (BusesProperties()
  76. #if ! JucePlugin_IsMidiEffect
  77. #if ! JucePlugin_IsSynth
  78. .withInput ("Input", AudioChannelSet::stereo(), true)
  79. #endif
  80. .withOutput ("Output", AudioChannelSet::stereo(), true)
  81. #endif
  82. )
  83. #endif
  84. {
  85. g_activeprocessors.insert(this);
  86. m_recbuffer.setSize(2, 44100);
  87. m_recbuffer.clear();
  88. if (m_afm->getNumKnownFormats()==0)
  89. m_afm->registerBasicFormats();
  90. m_stretch_source = std::make_unique<StretchAudioSource>(2, m_afm);
  91. m_ppar.pitch_shift.enabled = true;
  92. m_ppar.freq_shift.enabled = true;
  93. m_ppar.filter.enabled = true;
  94. m_ppar.compressor.enabled = true;
  95. m_stretch_source->setOnsetDetection(0.0);
  96. m_stretch_source->setLoopingEnabled(true);
  97. m_stretch_source->setFFTWindowingType(1);
  98. addParameter(new AudioParameterFloat("mainvolume0", "Main volume", -24.0f, 12.0f, -3.0f)); // 0
  99. addParameter(new AudioParameterFloat("stretchamount0", "Stretch amount",
  100. NormalisableRange<float>(0.1f, 1024.0f, 0.01f, 0.25),1.0f)); // 1
  101. addParameter(new AudioParameterFloat("fftsize0", "FFT size", 0.0f, 1.0f, 0.7f)); // 2
  102. addParameter(new AudioParameterFloat("pitchshift0", "Pitch shift", -24.0f, 24.0f, 0.0f)); // 3
  103. addParameter(new AudioParameterFloat("freqshift0", "Frequency shift", -1000.0f, 1000.0f, 0.0f)); // 4
  104. addParameter(new AudioParameterFloat("playrange_start0", "Sound start", 0.0f, 1.0f, 0.0f)); // 5
  105. addParameter(new AudioParameterFloat("playrange_end0", "Sound end", 0.0f, 1.0f, 1.0f)); // 6
  106. addParameter(new AudioParameterBool("freeze0", "Freeze", false)); // 7
  107. addParameter(new AudioParameterFloat("spread0", "Frequency spread", 0.0f, 1.0f, 0.0f)); // 8
  108. addParameter(new AudioParameterFloat("compress0", "Compress", 0.0f, 1.0f, 0.0f)); // 9
  109. addParameter(new AudioParameterFloat("loopxfadelen0", "Loop xfade length", 0.0f, 1.0f, 0.01f)); // 10
  110. auto numhar_convertFrom0To1Func = [](float rangemin, float rangemax, float value)
  111. {
  112. return jmap<float>(value, 0.0f, 1.0f, 101.0f, 1.0f);
  113. };
  114. auto numhar_convertTo0To1Func = [](float rangemin, float rangemax, float value)
  115. {
  116. return jmap<float>(value, 101.0f, 1.0f, 0.0f, 1.0f);
  117. };
  118. addParameter(new AudioParameterFloat("numharmonics0", "Num harmonics",
  119. NormalisableRange<float>(1.0f, 101.0f,
  120. numhar_convertFrom0To1Func, numhar_convertTo0To1Func), 101.0f)); // 11
  121. addParameter(new AudioParameterFloat("harmonicsfreq0", "Harmonics base freq",
  122. NormalisableRange<float>(1.0f, 5000.0f, 1.00f, 0.5), 128.0f)); // 12
  123. addParameter(new AudioParameterFloat("harmonicsbw0", "Harmonics bandwidth", 0.1f, 200.0f, 25.0f)); // 13
  124. addParameter(new AudioParameterBool("harmonicsgauss0", "Gaussian harmonics", false)); // 14
  125. addParameter(new AudioParameterFloat("octavemixm2_0", "2 octaves down level", 0.0f, 1.0f, 0.0f)); // 15
  126. addParameter(new AudioParameterFloat("octavemixm1_0", "Octave down level", 0.0f, 1.0f, 0.0f)); // 16
  127. addParameter(new AudioParameterFloat("octavemix0_0", "Normal pitch level", 0.0f, 1.0f, 1.0f)); // 17
  128. addParameter(new AudioParameterFloat("octavemix1_0", "1 octave up level", 0.0f, 1.0f, 0.0f)); // 18
  129. addParameter(new AudioParameterFloat("octavemix15_0", "1 octave and fifth up level", 0.0f, 1.0f, 0.0f)); // 19
  130. addParameter(new AudioParameterFloat("octavemix2_0", "2 octaves up level", 0.0f, 1.0f, 0.0f)); // 20
  131. addParameter(new AudioParameterFloat("tonalvsnoisebw_0", "Tonal vs Noise BW", 0.74f, 1.0f, 0.74f)); // 21
  132. addParameter(new AudioParameterFloat("tonalvsnoisepreserve_0", "Tonal vs Noise preserve", -1.0f, 1.0f, 0.5f)); // 22
  133. auto filt_convertFrom0To1Func = [](float rangemin, float rangemax, float value)
  134. {
  135. if (value < 0.5f)
  136. return jmap<float>(value, 0.0f, 0.5f, 20.0f, 1000.0f);
  137. return jmap<float>(value, 0.5f, 1.0f, 1000.0f, 20000.0f);
  138. };
  139. auto filt_convertTo0To1Func = [](float rangemin, float rangemax, float value)
  140. {
  141. if (value < 1000.0f)
  142. return jmap<float>(value, 20.0f, 1000.0f, 0.0f, 0.5f);
  143. return jmap<float>(value, 1000.0f, 20000.0f, 0.5f, 1.0f);
  144. };
  145. addParameter(new AudioParameterFloat("filter_low_0", "Filter low",
  146. NormalisableRange<float>(20.0f, 20000.0f,
  147. filt_convertFrom0To1Func, filt_convertTo0To1Func), 20.0f)); // 23
  148. addParameter(new AudioParameterFloat("filter_high_0", "Filter high",
  149. NormalisableRange<float>(20.0f, 20000.0f,
  150. filt_convertFrom0To1Func,filt_convertTo0To1Func), 20000.0f));; // 24
  151. addParameter(new AudioParameterFloat("onsetdetect_0", "Onset detection", 0.0f, 1.0f, 0.0f)); // 25
  152. addParameter(new AudioParameterBool("capture_enabled0", "Capture", false)); // 26
  153. m_outchansparam = new AudioParameterInt("numoutchans0", "Num output channels", 2, 8, 2); // 27
  154. addParameter(m_outchansparam); // 27
  155. addParameter(new AudioParameterBool("pause_enabled0", "Pause", false)); // 28
  156. addParameter(new AudioParameterFloat("maxcapturelen_0", "Max capture length", 1.0f, 120.0f, 10.0f)); // 29
  157. addParameter(new AudioParameterBool("passthrough0", "Pass input through", false)); // 30
  158. auto& pars = getParameters();
  159. for (const auto& p : pars)
  160. m_reset_pars.push_back(p->getValue());
  161. setPreBufferAmount(2);
  162. startTimer(1, 50);
  163. }
  164. PaulstretchpluginAudioProcessor::~PaulstretchpluginAudioProcessor()
  165. {
  166. g_activeprocessors.erase(this);
  167. m_bufferingthread.stopThread(1000);
  168. }
  169. void PaulstretchpluginAudioProcessor::resetParameters()
  170. {
  171. ScopedLock locker(m_cs);
  172. for (int i = 0; i < m_reset_pars.size(); ++i)
  173. {
  174. if (i!=cpi_main_volume && i!=cpi_passthrough)
  175. setParameter(i, m_reset_pars[i]);
  176. }
  177. }
  178. void PaulstretchpluginAudioProcessor::setPreBufferAmount(int x)
  179. {
  180. int temp = jlimit(0, 5, x);
  181. if (temp != m_prebuffer_amount || m_use_backgroundbuffering == false)
  182. {
  183. m_use_backgroundbuffering = true;
  184. m_prebuffer_amount = temp;
  185. m_recreate_buffering_source = true;
  186. ScopedLock locker(m_cs);
  187. m_prebuffering_inited = false;
  188. m_cur_num_out_chans = *m_outchansparam;
  189. //Logger::writeToLog("Switching to use " + String(m_cur_num_out_chans) + " out channels");
  190. String err;
  191. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  192. m_cur_num_out_chans, m_curmaxblocksize, err);
  193. m_prebuffering_inited = true;
  194. }
  195. }
  196. int PaulstretchpluginAudioProcessor::getPreBufferAmount()
  197. {
  198. if (m_use_backgroundbuffering == false)
  199. return -1;
  200. return m_prebuffer_amount;
  201. }
  202. ValueTree PaulstretchpluginAudioProcessor::getStateTree()
  203. {
  204. ValueTree paramtree("paulstretch3pluginstate");
  205. for (int i = 0; i<getNumParameters(); ++i)
  206. {
  207. auto par = getFloatParameter(i);
  208. if (par != nullptr)
  209. {
  210. paramtree.setProperty(par->paramID, (double)*par, nullptr);
  211. }
  212. }
  213. paramtree.setProperty(m_outchansparam->paramID, (int)*m_outchansparam, nullptr);
  214. if (m_current_file != File())
  215. {
  216. paramtree.setProperty("importedfile", m_current_file.getFullPathName(), nullptr);
  217. }
  218. auto specorder = m_stretch_source->getSpectrumProcessOrder();
  219. paramtree.setProperty("numspectralstages", (int)specorder.size(), nullptr);
  220. for (int i = 0; i < specorder.size(); ++i)
  221. {
  222. paramtree.setProperty("specorder" + String(i), specorder[i], nullptr);
  223. }
  224. if (m_use_backgroundbuffering)
  225. paramtree.setProperty("prebufamount", m_prebuffer_amount, nullptr);
  226. else
  227. paramtree.setProperty("prebufamount", -1, nullptr);
  228. paramtree.setProperty("loadfilewithstate", m_load_file_with_state, nullptr);
  229. return paramtree;
  230. }
  231. void PaulstretchpluginAudioProcessor::setStateFromTree(ValueTree tree)
  232. {
  233. if (tree.isValid())
  234. {
  235. {
  236. ScopedLock locker(m_cs);
  237. m_load_file_with_state = tree.getProperty("loadfilewithstate", true);
  238. if (tree.hasProperty("numspectralstages"))
  239. {
  240. std::vector<int> order;
  241. int ordersize = tree.getProperty("numspectralstages");
  242. for (int i = 0; i < ordersize; ++i)
  243. {
  244. order.push_back((int)tree.getProperty("specorder" + String(i)));
  245. }
  246. m_stretch_source->setSpectrumProcessOrder(order);
  247. }
  248. for (int i = 0; i < getNumParameters(); ++i)
  249. {
  250. auto par = getFloatParameter(i);
  251. if (par != nullptr)
  252. {
  253. double parval = tree.getProperty(par->paramID, (double)*par);
  254. *par = parval;
  255. }
  256. }
  257. if (tree.hasProperty(m_outchansparam->paramID))
  258. *m_outchansparam = tree.getProperty(m_outchansparam->paramID, 2);
  259. }
  260. int prebufamt = tree.getProperty("prebufamount", 2);
  261. if (prebufamt == -1)
  262. m_use_backgroundbuffering = false;
  263. else
  264. setPreBufferAmount(prebufamt);
  265. if (m_load_file_with_state == true)
  266. {
  267. String fn = tree.getProperty("importedfile");
  268. if (fn.isEmpty() == false)
  269. {
  270. File f(fn);
  271. setAudioFile(f);
  272. }
  273. }
  274. }
  275. }
  276. //==============================================================================
  277. const String PaulstretchpluginAudioProcessor::getName() const
  278. {
  279. return JucePlugin_Name;
  280. }
  281. bool PaulstretchpluginAudioProcessor::acceptsMidi() const
  282. {
  283. #if JucePlugin_WantsMidiInput
  284. return true;
  285. #else
  286. return false;
  287. #endif
  288. }
  289. bool PaulstretchpluginAudioProcessor::producesMidi() const
  290. {
  291. #if JucePlugin_ProducesMidiOutput
  292. return true;
  293. #else
  294. return false;
  295. #endif
  296. }
  297. bool PaulstretchpluginAudioProcessor::isMidiEffect() const
  298. {
  299. #if JucePlugin_IsMidiEffect
  300. return true;
  301. #else
  302. return false;
  303. #endif
  304. }
  305. double PaulstretchpluginAudioProcessor::getTailLengthSeconds() const
  306. {
  307. return 0.0;
  308. //return (double)m_bufamounts[m_prebuffer_amount]/getSampleRate();
  309. }
  310. int PaulstretchpluginAudioProcessor::getNumPrograms()
  311. {
  312. return 3; // NB: some hosts don't cope very well if you tell them there are 0 programs,
  313. // so this should be at least 1, even if you're not really implementing programs.
  314. }
  315. int PaulstretchpluginAudioProcessor::getCurrentProgram()
  316. {
  317. return m_cur_program;
  318. }
  319. void PaulstretchpluginAudioProcessor::setCurrentProgram (int index)
  320. {
  321. index = jlimit(0, 2, index);
  322. m_cur_program = index;
  323. bool temp = m_load_file_with_state;
  324. m_load_file_with_state = false;
  325. MemoryBlock mb;
  326. MemoryOutputStream stream(mb, true);
  327. if (Base64::convertFromBase64(stream, g_presets[index].m_data)==true)
  328. {
  329. ValueTree tree = ValueTree::readFromData(mb.getData(), mb.getSize());
  330. tree.setProperty("loadfilewithstate", false, nullptr);
  331. setStateFromTree(tree);
  332. }
  333. m_load_file_with_state = temp;
  334. }
  335. const String PaulstretchpluginAudioProcessor::getProgramName (int index)
  336. {
  337. index = jlimit(0, 2, index);
  338. return g_presets[index].m_name;
  339. }
  340. void PaulstretchpluginAudioProcessor::changeProgramName (int index, const String& newName)
  341. {
  342. }
  343. void PaulstretchpluginAudioProcessor::setFFTSize(double size)
  344. {
  345. if (m_prebuffer_amount == 5)
  346. m_fft_size_to_use = pow(2, 7.0 + size * 14.5);
  347. else m_fft_size_to_use = pow(2, 7.0 + size * 10.0); // chicken out from allowing huge FFT sizes if not enough prebuffering
  348. int optim = optimizebufsize(m_fft_size_to_use);
  349. m_fft_size_to_use = optim;
  350. m_stretch_source->setFFTSize(optim);
  351. //Logger::writeToLog(String(m_fft_size_to_use));
  352. }
  353. void PaulstretchpluginAudioProcessor::startplay(Range<double> playrange, int numoutchans, int maxBlockSize, String& err)
  354. {
  355. m_stretch_source->setPlayRange(playrange, true);
  356. int bufamt = m_bufamounts[m_prebuffer_amount];
  357. if (m_buffering_source != nullptr && numoutchans != m_buffering_source->getNumberOfChannels())
  358. m_recreate_buffering_source = true;
  359. if (m_recreate_buffering_source == true)
  360. {
  361. m_buffering_source = std::make_unique<MyBufferingAudioSource>(m_stretch_source.get(),
  362. m_bufferingthread, false, bufamt, numoutchans, false);
  363. m_recreate_buffering_source = false;
  364. }
  365. if (m_bufferingthread.isThreadRunning() == false)
  366. m_bufferingthread.startThread();
  367. m_stretch_source->setNumOutChannels(numoutchans);
  368. m_stretch_source->setFFTSize(m_fft_size_to_use);
  369. m_stretch_source->setProcessParameters(&m_ppar);
  370. m_last_outpos_pos = 0.0;
  371. m_last_in_pos = playrange.getStart()*m_stretch_source->getInfileLengthSeconds();
  372. m_buffering_source->prepareToPlay(maxBlockSize, getSampleRateChecked());
  373. }
  374. void PaulstretchpluginAudioProcessor::setParameters(const std::vector<double>& pars)
  375. {
  376. ScopedLock locker(m_cs);
  377. for (int i = 0; i < getNumParameters(); ++i)
  378. {
  379. if (i<pars.size())
  380. setParameter(i, pars[i]);
  381. }
  382. }
  383. double PaulstretchpluginAudioProcessor::getSampleRateChecked()
  384. {
  385. if (m_cur_sr < 1.0)
  386. return 44100.0;
  387. return m_cur_sr;
  388. }
  389. void PaulstretchpluginAudioProcessor::prepareToPlay(double sampleRate, int samplesPerBlock)
  390. {
  391. ScopedLock locker(m_cs);
  392. m_cur_sr = sampleRate;
  393. m_curmaxblocksize = samplesPerBlock;
  394. m_input_buffer.setSize(2, samplesPerBlock);
  395. int numoutchans = *m_outchansparam;
  396. if (numoutchans != m_cur_num_out_chans)
  397. m_prebuffering_inited = false;
  398. if (m_using_memory_buffer == true)
  399. {
  400. int len = jlimit(100,m_recbuffer.getNumSamples(),
  401. int(getSampleRateChecked()*(*getFloatParameter(cpi_max_capture_len))));
  402. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer,
  403. getSampleRateChecked(),
  404. len);
  405. callGUI(this,[this,len](auto ed) { ed->setAudioBuffer(&m_recbuffer, getSampleRateChecked(), len); },false);
  406. }
  407. if (m_prebuffering_inited == false)
  408. {
  409. setFFTSize(*getFloatParameter(cpi_fftsize));
  410. m_stretch_source->setProcessParameters(&m_ppar);
  411. m_stretch_source->setFFTWindowingType(1);
  412. String err;
  413. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  414. numoutchans, samplesPerBlock, err);
  415. m_cur_num_out_chans = numoutchans;
  416. m_prebuffering_inited = true;
  417. }
  418. else
  419. {
  420. m_buffering_source->prepareToPlay(samplesPerBlock, getSampleRateChecked());
  421. }
  422. }
  423. void PaulstretchpluginAudioProcessor::releaseResources()
  424. {
  425. //m_control->stopplay();
  426. //m_ready_to_play = false;
  427. }
  428. #ifndef JucePlugin_PreferredChannelConfigurations
  429. bool PaulstretchpluginAudioProcessor::isBusesLayoutSupported (const BusesLayout& layouts) const
  430. {
  431. #if JucePlugin_IsMidiEffect
  432. ignoreUnused (layouts);
  433. return true;
  434. #else
  435. // This is the place where you check if the layout is supported.
  436. // In this template code we only support mono or stereo.
  437. if (layouts.getMainOutputChannelSet() != AudioChannelSet::mono()
  438. && layouts.getMainOutputChannelSet() != AudioChannelSet::stereo())
  439. return false;
  440. // This checks if the input layout matches the output layout
  441. #if ! JucePlugin_IsSynth
  442. if (layouts.getMainOutputChannelSet() != layouts.getMainInputChannelSet())
  443. return false;
  444. #endif
  445. return true;
  446. #endif
  447. }
  448. #endif
  449. void copyAudioBufferWrappingPosition(const AudioBuffer<float>& src, AudioBuffer<float>& dest, int destbufpos, int maxdestpos)
  450. {
  451. for (int i = 0; i < dest.getNumChannels(); ++i)
  452. {
  453. int channel_to_copy = i % src.getNumChannels();
  454. if (destbufpos + src.getNumSamples() > maxdestpos)
  455. {
  456. int wrappos = (destbufpos + src.getNumSamples()) % maxdestpos;
  457. int partial_len = src.getNumSamples() - wrappos;
  458. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, partial_len);
  459. dest.copyFrom(channel_to_copy, partial_len, src, channel_to_copy, 0, wrappos);
  460. }
  461. else
  462. {
  463. dest.copyFrom(channel_to_copy, destbufpos, src, channel_to_copy, 0, src.getNumSamples());
  464. }
  465. }
  466. }
  467. void PaulstretchpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, MidiBuffer& midiMessages)
  468. {
  469. ScopedLock locker(m_cs);
  470. AudioPlayHead* phead = getPlayHead();
  471. if (phead != nullptr)
  472. {
  473. phead->getCurrentPosition(m_playposinfo);
  474. }
  475. ScopedNoDenormals noDenormals;
  476. double srtemp = getSampleRate();
  477. if (srtemp != m_cur_sr)
  478. m_cur_sr = srtemp;
  479. const int totalNumInputChannels = getTotalNumInputChannels();
  480. const int totalNumOutputChannels = getTotalNumOutputChannels();
  481. for (int i = 0; i < totalNumInputChannels; ++i)
  482. m_input_buffer.copyFrom(i, 0, buffer, i, 0, buffer.getNumSamples());
  483. for (int i = totalNumInputChannels; i < totalNumOutputChannels; ++i)
  484. buffer.clear (i, 0, buffer.getNumSamples());
  485. if (m_prebuffering_inited == false)
  486. return;
  487. if (m_is_recording == true)
  488. {
  489. if (m_playposinfo.isPlaying == false && m_capture_when_host_plays == true)
  490. return;
  491. int recbuflenframes = m_max_reclen * getSampleRate();
  492. copyAudioBufferWrappingPosition(buffer, m_recbuffer, m_rec_pos, recbuflenframes);
  493. callGUI(this,[this, &buffer](PaulstretchpluginAudioProcessorEditor*ed)
  494. {
  495. ed->addAudioBlock(buffer, getSampleRate(), m_rec_pos);
  496. }, false);
  497. m_rec_pos = (m_rec_pos + buffer.getNumSamples()) % recbuflenframes;
  498. return;
  499. }
  500. jassert(m_buffering_source != nullptr);
  501. jassert(m_bufferingthread.isThreadRunning());
  502. if (m_last_host_playing == false && m_playposinfo.isPlaying)
  503. {
  504. m_stretch_source->seekPercent(*getFloatParameter(cpi_soundstart));
  505. m_last_host_playing = true;
  506. }
  507. else if (m_last_host_playing == true && m_playposinfo.isPlaying == false)
  508. {
  509. m_last_host_playing = false;
  510. }
  511. if (m_play_when_host_plays == true && m_playposinfo.isPlaying == false)
  512. return;
  513. m_stretch_source->setMainVolume(*getFloatParameter(cpi_main_volume));
  514. m_stretch_source->setRate(*getFloatParameter(cpi_stretchamount));
  515. setFFTSize(*getFloatParameter(cpi_fftsize));
  516. m_ppar.pitch_shift.cents = *getFloatParameter(cpi_pitchshift) * 100.0;
  517. m_ppar.freq_shift.Hz = *getFloatParameter(cpi_frequencyshift);
  518. m_ppar.spread.enabled = *getFloatParameter(cpi_spreadamount) > 0.0f;
  519. m_ppar.spread.bandwidth = *getFloatParameter(cpi_spreadamount);
  520. m_ppar.compressor.enabled = *getFloatParameter(cpi_compress)>0.0f;
  521. m_ppar.compressor.power = *getFloatParameter(cpi_compress);
  522. m_ppar.harmonics.enabled = *getFloatParameter(cpi_numharmonics)<101.0;
  523. m_ppar.harmonics.nharmonics = *getFloatParameter(cpi_numharmonics);
  524. m_ppar.harmonics.freq = *getFloatParameter(cpi_harmonicsfreq);
  525. m_ppar.harmonics.bandwidth = *getFloatParameter(cpi_harmonicsbw);
  526. m_ppar.harmonics.gauss = getParameter(cpi_harmonicsgauss);
  527. m_ppar.octave.om2 = *getFloatParameter(cpi_octavesm2);
  528. m_ppar.octave.om1 = *getFloatParameter(cpi_octavesm1);
  529. m_ppar.octave.o0 = *getFloatParameter(cpi_octaves0);
  530. m_ppar.octave.o1 = *getFloatParameter(cpi_octaves1);
  531. m_ppar.octave.o15 = *getFloatParameter(cpi_octaves15);
  532. m_ppar.octave.o2 = *getFloatParameter(cpi_octaves2);
  533. m_ppar.octave.enabled = true;
  534. m_ppar.filter.low = *getFloatParameter(cpi_filter_low);
  535. m_ppar.filter.high = *getFloatParameter(cpi_filter_high);
  536. m_ppar.tonal_vs_noise.enabled = (*getFloatParameter(cpi_tonalvsnoisebw)) > 0.75;
  537. m_ppar.tonal_vs_noise.bandwidth = *getFloatParameter(cpi_tonalvsnoisebw);
  538. m_ppar.tonal_vs_noise.preserve = *getFloatParameter(cpi_tonalvsnoisepreserve);
  539. m_stretch_source->setOnsetDetection(*getFloatParameter(cpi_onsetdetection));
  540. m_stretch_source->setLoopXFadeLength(*getFloatParameter(cpi_loopxfadelen));
  541. double t0 = *getFloatParameter(cpi_soundstart);
  542. double t1 = *getFloatParameter(cpi_soundend);
  543. if (t0 > t1)
  544. std::swap(t0, t1);
  545. if (t1 - t0 < 0.001)
  546. t1 = t0 + 0.001;
  547. m_stretch_source->setPlayRange({ t0,t1 }, true);
  548. m_stretch_source->setFreezing(getParameter(cpi_freeze));
  549. m_stretch_source->setPaused(getParameter(cpi_pause_enabled));
  550. m_stretch_source->setProcessParameters(&m_ppar);
  551. AudioSourceChannelInfo aif(buffer);
  552. if (isNonRealtime() || m_use_backgroundbuffering == false)
  553. {
  554. m_stretch_source->getNextAudioBlock(aif);
  555. }
  556. else
  557. {
  558. m_buffering_source->getNextAudioBlock(aif);
  559. }
  560. if (getParameter(cpi_passthrough) > 0.5f)
  561. {
  562. for (int i = 0; i < totalNumInputChannels; ++i)
  563. {
  564. buffer.addFrom(i, 0, m_input_buffer, i, 0, buffer.getNumSamples());
  565. }
  566. }
  567. for (int i = 0; i < buffer.getNumChannels(); ++i)
  568. {
  569. for (int j = 0; j < buffer.getNumSamples(); ++j)
  570. {
  571. float sample = buffer.getSample(i,j);
  572. if (std::isnan(sample) || std::isinf(sample))
  573. ++m_abnormal_output_samples;
  574. }
  575. }
  576. }
  577. //==============================================================================
  578. bool PaulstretchpluginAudioProcessor::hasEditor() const
  579. {
  580. return true; // (change this to false if you choose to not supply an editor)
  581. }
  582. AudioProcessorEditor* PaulstretchpluginAudioProcessor::createEditor()
  583. {
  584. return new PaulstretchpluginAudioProcessorEditor (*this);
  585. }
  586. //==============================================================================
  587. void PaulstretchpluginAudioProcessor::getStateInformation (MemoryBlock& destData)
  588. {
  589. ValueTree paramtree = getStateTree();
  590. MemoryOutputStream stream(destData,true);
  591. paramtree.writeToStream(stream);
  592. }
  593. void PaulstretchpluginAudioProcessor::setStateInformation (const void* data, int sizeInBytes)
  594. {
  595. ValueTree tree = ValueTree::readFromData(data, sizeInBytes);
  596. setStateFromTree(tree);
  597. }
  598. void PaulstretchpluginAudioProcessor::setRecordingEnabled(bool b)
  599. {
  600. ScopedLock locker(m_cs);
  601. int lenbufframes = getSampleRateChecked()*m_max_reclen;
  602. if (b == true)
  603. {
  604. m_using_memory_buffer = true;
  605. m_current_file = File();
  606. m_recbuffer.setSize(2, m_max_reclen*getSampleRateChecked()+4096,false,false,true);
  607. m_recbuffer.clear();
  608. m_rec_pos = 0;
  609. callGUI(this,[this,lenbufframes](PaulstretchpluginAudioProcessorEditor* ed)
  610. {
  611. ed->beginAddingAudioBlocks(2, getSampleRateChecked(), lenbufframes);
  612. },false);
  613. m_is_recording = true;
  614. }
  615. else
  616. {
  617. if (m_is_recording == true)
  618. {
  619. finishRecording(lenbufframes);
  620. }
  621. }
  622. }
  623. double PaulstretchpluginAudioProcessor::getRecordingPositionPercent()
  624. {
  625. if (m_is_recording==false)
  626. return 0.0;
  627. return 1.0 / m_recbuffer.getNumSamples()*m_rec_pos;
  628. }
  629. String PaulstretchpluginAudioProcessor::setAudioFile(File f)
  630. {
  631. //if (f==File())
  632. // return String();
  633. //if (f==m_current_file && f.getLastModificationTime()==m_current_file_date)
  634. // return String();
  635. auto ai = unique_from_raw(m_afm->createReaderFor(f));
  636. if (ai != nullptr)
  637. {
  638. if (ai->numChannels > 32)
  639. {
  640. //MessageManager::callAsync([cb, file]() { cb("Too many channels in file " + file.getFullPathName()); });
  641. return "Too many channels in file "+f.getFullPathName();
  642. }
  643. if (ai->bitsPerSample>32)
  644. {
  645. //MessageManager::callAsync([cb, file]() { cb("Too high bit depth in file " + file.getFullPathName()); });
  646. return "Too high bit depth in file " + f.getFullPathName();
  647. }
  648. ScopedLock locker(m_cs);
  649. m_stretch_source->setAudioFile(f);
  650. m_current_file = f;
  651. m_current_file_date = m_current_file.getLastModificationTime();
  652. m_using_memory_buffer = false;
  653. return String();
  654. //MessageManager::callAsync([cb, file]() { cb(String()); });
  655. }
  656. return "Could not open file " + f.getFullPathName();
  657. }
  658. Range<double> PaulstretchpluginAudioProcessor::getTimeSelection()
  659. {
  660. return { *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) };
  661. }
  662. double PaulstretchpluginAudioProcessor::getPreBufferingPercent()
  663. {
  664. if (m_buffering_source==nullptr)
  665. return 0.0;
  666. return m_buffering_source->getPercentReady();
  667. }
  668. void PaulstretchpluginAudioProcessor::timerCallback(int id)
  669. {
  670. if (id == 1)
  671. {
  672. bool capture = getParameter(cpi_capture_enabled);
  673. if (capture == false && m_max_reclen != *getFloatParameter(cpi_max_capture_len))
  674. {
  675. m_max_reclen = *getFloatParameter(cpi_max_capture_len);
  676. //Logger::writeToLog("Changing max capture len to " + String(m_max_reclen));
  677. }
  678. if (capture == true && m_is_recording == false)
  679. {
  680. setRecordingEnabled(true);
  681. return;
  682. }
  683. if (capture == false && m_is_recording == true)
  684. {
  685. setRecordingEnabled(false);
  686. return;
  687. }
  688. if (m_cur_num_out_chans != *m_outchansparam)
  689. {
  690. jassert(m_curmaxblocksize > 0);
  691. ScopedLock locker(m_cs);
  692. m_prebuffering_inited = false;
  693. m_cur_num_out_chans = *m_outchansparam;
  694. //Logger::writeToLog("Switching to use " + String(m_cur_num_out_chans) + " out channels");
  695. String err;
  696. startplay({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) },
  697. m_cur_num_out_chans, m_curmaxblocksize, err);
  698. m_prebuffering_inited = true;
  699. }
  700. }
  701. }
  702. void PaulstretchpluginAudioProcessor::finishRecording(int lenrecording)
  703. {
  704. m_is_recording = false;
  705. m_stretch_source->setAudioBufferAsInputSource(&m_recbuffer, getSampleRateChecked(), lenrecording);
  706. m_stretch_source->setPlayRange({ *getFloatParameter(cpi_soundstart),*getFloatParameter(cpi_soundend) }, true);
  707. auto ed = dynamic_cast<PaulstretchpluginAudioProcessorEditor*>(getActiveEditor());
  708. if (ed)
  709. {
  710. //ed->setAudioBuffer(&m_recbuffer, getSampleRate(), lenrecording);
  711. }
  712. }
  713. //==============================================================================
  714. // This creates new instances of the plugin..
  715. AudioProcessor* JUCE_CALLTYPE createPluginFilter()
  716. {
  717. return new PaulstretchpluginAudioProcessor();
  718. }