juuhtags/v100_p5
@@ -74,7 +74,9 @@ public: | |||
std::lock_guard<std::mutex> locker(m_mutex); | |||
m_using_memory_buffer = false; | |||
m_afreader = std::unique_ptr<AudioFormatReader>(reader); | |||
m_currentsample = 0; | |||
if (m_activerange.isEmpty()) | |||
m_activerange = { 0.0,1.0 }; | |||
m_currentsample = m_activerange.getStart()*info.nsamples; | |||
info.samplerate = (int)m_afreader->sampleRate; | |||
info.nchannels = m_afreader->numChannels; | |||
info.nsamples = m_afreader->lengthInSamples; | |||
@@ -83,6 +85,7 @@ public: | |||
m_readbuf.setSize(info.nchannels, m_readbuf.getNumSamples()); | |||
m_crossfadebuf.setSize(info.nchannels, m_crossfadebuf.getNumSamples()); | |||
} | |||
updateXFadeCache(); | |||
m_readbuf.clear(); | |||
return true; | |||
} | |||
@@ -104,9 +104,9 @@ void ProcessedStretch::process_spectrum(REALTYPE *freq) | |||
{ | |||
for (auto& e : m_spectrum_processes) | |||
{ | |||
copy(freq, infreq.data()); | |||
spectrum_copy(nfreq, freq, infreq.data()); | |||
if (e == 0 && pars.harmonics.enabled) | |||
do_harmonics(infreq.data(), freq); | |||
spectrum_do_harmonics(pars, tmpfreq1, nfreq, samplerate, infreq.data(), freq); | |||
if (e == 1 && pars.tonal_vs_noise.enabled) | |||
do_tonal_vs_noise(infreq.data(), freq); | |||
if (e == 2 && pars.freq_shift.enabled) | |||
@@ -174,15 +174,6 @@ void ProcessedStretch::process_spectrum(REALTYPE *freq) | |||
//void ProcessedStretch::process_output(REALTYPE *smps,int nsmps){ | |||
//}; | |||
REALTYPE profile(REALTYPE fi, REALTYPE bwi){ | |||
REALTYPE x=fi/bwi; | |||
x*=x; | |||
if (x>14.71280603) return 0.0; | |||
return exp(-x);///bwi; | |||
}; | |||
void ProcessedStretch::do_harmonics(REALTYPE *freq1,REALTYPE *freq2){ | |||
REALTYPE freq=pars.harmonics.freq; | |||
REALTYPE bandwidth=pars.harmonics.bandwidth; | |||
@@ -180,6 +180,263 @@ struct ProcessParameters | |||
} | |||
}; | |||
inline REALTYPE profile(REALTYPE fi, REALTYPE bwi) { | |||
REALTYPE x = fi / bwi; | |||
x *= x; | |||
if (x>14.71280603) return 0.0; | |||
return exp(-x);///bwi; | |||
}; | |||
inline void spectrum_copy(int nfreq, REALTYPE* freq1, REALTYPE* freq2) | |||
{ | |||
for (int i = 0; i<nfreq; i++) freq2[i] = freq1[i]; | |||
}; | |||
inline void spectrum_spread(int nfreq, double samplerate, | |||
std::vector<REALTYPE>& tmpfreq1, | |||
REALTYPE *freq1, REALTYPE *freq2, REALTYPE spread_bandwidth) { | |||
//convert to log spectrum | |||
REALTYPE minfreq = 20.0f; | |||
REALTYPE maxfreq = 0.5f*samplerate; | |||
REALTYPE log_minfreq = log(minfreq); | |||
REALTYPE log_maxfreq = log(maxfreq); | |||
for (int i = 0; i<nfreq; i++) { | |||
REALTYPE freqx = i / (REALTYPE)nfreq; | |||
REALTYPE x = exp(log_minfreq + freqx * (log_maxfreq - log_minfreq)) / maxfreq * nfreq; | |||
REALTYPE y = 0.0f; | |||
int x0 = (int)floor(x); if (x0 >= nfreq) x0 = nfreq - 1; | |||
int x1 = x0 + 1; if (x1 >= nfreq) x1 = nfreq - 1; | |||
REALTYPE xp = x - x0; | |||
if (x<nfreq) { | |||
y = freq1[x0] * (1.0f - xp) + freq1[x1] * xp; | |||
}; | |||
tmpfreq1[i] = y; | |||
}; | |||
//increase the bandwidth of each harmonic (by smoothing the log spectrum) | |||
int n = 2; | |||
REALTYPE bandwidth = spread_bandwidth; | |||
REALTYPE a = 1.0f - pow(2.0f, -bandwidth * bandwidth*10.0f); | |||
a = pow(a, 8192.0f / nfreq * n); | |||
for (int k = 0; k<n; k++) { | |||
tmpfreq1[0] = 0.0f; | |||
for (int i = 1; i<nfreq; i++) { | |||
tmpfreq1[i] = tmpfreq1[i - 1] * a + tmpfreq1[i] * (1.0f - a); | |||
}; | |||
tmpfreq1[nfreq - 1] = 0.0f; | |||
for (int i = nfreq - 2; i>0; i--) { | |||
tmpfreq1[i] = tmpfreq1[i + 1] * a + tmpfreq1[i] * (1.0f - a); | |||
}; | |||
}; | |||
freq2[0] = 0; | |||
REALTYPE log_maxfreq_d_minfreq = log(maxfreq / minfreq); | |||
for (int i = 1; i<nfreq; i++) { | |||
REALTYPE freqx = i / (REALTYPE)nfreq; | |||
REALTYPE x = log((freqx*maxfreq) / minfreq) / log_maxfreq_d_minfreq * nfreq; | |||
REALTYPE y = 0.0; | |||
if ((x>0.0) && (x<nfreq)) { | |||
int x0 = (int)floor(x); if (x0 >= nfreq) x0 = nfreq - 1; | |||
int x1 = x0 + 1; if (x1 >= nfreq) x1 = nfreq - 1; | |||
REALTYPE xp = x - x0; | |||
y = tmpfreq1[x0] * (1.0f - xp) + tmpfreq1[x1] * xp; | |||
}; | |||
freq2[i] = y; | |||
}; | |||
}; | |||
inline void spectrum_do_compressor(const ProcessParameters& pars, int nfreq, REALTYPE *freq1, REALTYPE *freq2) { | |||
REALTYPE rms = 0.0; | |||
for (int i = 0; i<nfreq; i++) rms += freq1[i] * freq1[i]; | |||
rms = sqrt(rms / nfreq)*0.1f; | |||
if (rms<1e-3f) rms = 1e-3f; | |||
REALTYPE _rap = pow(rms, -pars.compressor.power); | |||
for (int i = 0; i<nfreq; i++) freq2[i] = freq1[i] * _rap; | |||
}; | |||
inline void spectrum_do_tonal_vs_noise(const ProcessParameters& pars, int nfreq, double samplerate, | |||
std::vector<REALTYPE>& tmpfreq1, | |||
REALTYPE *freq1, REALTYPE *freq2) { | |||
spectrum_spread(nfreq, samplerate, tmpfreq1, freq1, tmpfreq1.data(), pars.tonal_vs_noise.bandwidth); | |||
if (pars.tonal_vs_noise.preserve >= 0.0) { | |||
REALTYPE mul = (pow(10.0f, pars.tonal_vs_noise.preserve) - 1.0f); | |||
for (int i = 0; i<nfreq; i++) { | |||
REALTYPE x = freq1[i]; | |||
REALTYPE smooth_x = tmpfreq1[i] + 1e-6f; | |||
REALTYPE result = 0.0f; | |||
result = x - smooth_x * mul; | |||
if (result<0.0f) result = 0.0f; | |||
freq2[i] = result; | |||
}; | |||
} | |||
else { | |||
REALTYPE mul = (pow(5.0f, 1.0f + pars.tonal_vs_noise.preserve) - 1.0f); | |||
for (int i = 0; i<nfreq; i++) { | |||
REALTYPE x = freq1[i]; | |||
REALTYPE smooth_x = tmpfreq1[i] + 1e-6f; | |||
REALTYPE result = 0.0f; | |||
result = x - smooth_x * mul + 0.1f*mul; | |||
if (result<0.0f) result = x; | |||
else result = 0.0f; | |||
freq2[i] = result; | |||
}; | |||
}; | |||
}; | |||
inline void spectrum_do_harmonics(const ProcessParameters& pars, std::vector<REALTYPE>& tmpfreq1, int nfreq, double samplerate, REALTYPE *freq1, REALTYPE *freq2) { | |||
REALTYPE freq = pars.harmonics.freq; | |||
REALTYPE bandwidth = pars.harmonics.bandwidth; | |||
int nharmonics = pars.harmonics.nharmonics; | |||
if (freq<10.0) freq = 10.0; | |||
REALTYPE *amp = tmpfreq1.data(); | |||
for (int i = 0; i<nfreq; i++) amp[i] = 0.0; | |||
for (int nh = 1; nh <= nharmonics; nh++) {//for each harmonic | |||
REALTYPE bw_Hz;//bandwidth of the current harmonic measured in Hz | |||
REALTYPE bwi; | |||
REALTYPE fi; | |||
REALTYPE f = nh * freq; | |||
if (f >= samplerate / 2) break; | |||
bw_Hz = (pow(2.0f, bandwidth / 1200.0f) - 1.0f)*f; | |||
bwi = bw_Hz / (2.0f*samplerate); | |||
fi = f / samplerate; | |||
REALTYPE sum = 0.0f; | |||
REALTYPE max = 0.0f; | |||
for (int i = 1; i<nfreq; i++) {//todo: optimize here | |||
REALTYPE hprofile; | |||
hprofile = profile((i / (REALTYPE)nfreq*0.5f) - fi, bwi); | |||
amp[i] += hprofile; | |||
if (max<hprofile) max = hprofile; | |||
sum += hprofile; | |||
}; | |||
}; | |||
REALTYPE max = 0.0; | |||
for (int i = 1; i<nfreq; i++) { | |||
if (amp[i]>max) max = amp[i]; | |||
}; | |||
if (max<1e-8f) max = 1e-8f; | |||
for (int i = 1; i<nfreq; i++) { | |||
//REALTYPE c,s; | |||
REALTYPE a = amp[i] / max; | |||
if (!pars.harmonics.gauss) a = (a<0.368f ? 0.0f : 1.0f); | |||
freq2[i] = freq1[i] * a; | |||
}; | |||
}; | |||
inline void spectrum_add(int nfreq, REALTYPE *freq2, REALTYPE *freq1, REALTYPE a) { | |||
for (int i = 0; i<nfreq; i++) freq2[i] += freq1[i] * a; | |||
}; | |||
inline void spectrum_zero(int nfreq,REALTYPE *freq1) { | |||
for (int i = 0; i<nfreq; i++) freq1[i] = 0.0; | |||
}; | |||
inline void spectrum_do_freq_shift(const ProcessParameters& pars, int nfreq, double samplerate, REALTYPE *freq1, REALTYPE *freq2) { | |||
spectrum_zero(nfreq, freq2); | |||
int ifreq = (int)(pars.freq_shift.Hz / (samplerate*0.5)*nfreq); | |||
for (int i = 0; i<nfreq; i++) { | |||
int i2 = ifreq + i; | |||
if ((i2>0) && (i2<nfreq)) freq2[i2] = freq1[i]; | |||
}; | |||
}; | |||
inline void spectrum_do_pitch_shift(const ProcessParameters& pars, int nfreq, REALTYPE *freq1, REALTYPE *freq2, REALTYPE _rap) { | |||
spectrum_zero(nfreq,freq2); | |||
if (_rap<1.0) {//down | |||
for (int i = 0; i<nfreq; i++) { | |||
int i2 = (int)(i*_rap); | |||
if (i2 >= nfreq) break; | |||
freq2[i2] += freq1[i]; | |||
}; | |||
}; | |||
if (_rap >= 1.0) {//up | |||
_rap = 1.0f / _rap; | |||
for (int i = 0; i<nfreq; i++) { | |||
freq2[i] = freq1[(int)(i*_rap)]; | |||
}; | |||
}; | |||
}; | |||
inline void spectrum_do_octave(const ProcessParameters& pars, int nfreq, double samplerate, | |||
std::vector<REALTYPE>& sumfreq, | |||
std::vector<REALTYPE>& tmpfreq1, | |||
REALTYPE *freq1, REALTYPE *freq2) { | |||
spectrum_zero(nfreq,sumfreq.data()); | |||
if (pars.octave.om2>1e-3) { | |||
spectrum_do_pitch_shift(pars,nfreq, freq1, tmpfreq1.data(), 0.25); | |||
spectrum_add(nfreq, sumfreq.data(), tmpfreq1.data(), pars.octave.om2); | |||
}; | |||
if (pars.octave.om1>1e-3) { | |||
spectrum_do_pitch_shift(pars,nfreq, freq1, tmpfreq1.data(), 0.5); | |||
spectrum_add(nfreq,sumfreq.data(), tmpfreq1.data(), pars.octave.om1); | |||
}; | |||
if (pars.octave.o0>1e-3) { | |||
spectrum_add(nfreq,sumfreq.data(), freq1, pars.octave.o0); | |||
}; | |||
if (pars.octave.o1>1e-3) { | |||
spectrum_do_pitch_shift(pars,nfreq, freq1, tmpfreq1.data(), 2.0); | |||
spectrum_add(nfreq,sumfreq.data(), tmpfreq1.data(), pars.octave.o1); | |||
}; | |||
if (pars.octave.o15>1e-3) { | |||
spectrum_do_pitch_shift(pars,nfreq, freq1, tmpfreq1.data(), 3.0); | |||
spectrum_add(nfreq,sumfreq.data(), tmpfreq1.data(), pars.octave.o15); | |||
}; | |||
if (pars.octave.o2>1e-3) { | |||
spectrum_do_pitch_shift(pars, nfreq, freq1, tmpfreq1.data(), 4.0); | |||
spectrum_add(nfreq,sumfreq.data(), tmpfreq1.data(), pars.octave.o2); | |||
}; | |||
REALTYPE sum = 0.01f + pars.octave.om2 + pars.octave.om1 + pars.octave.o0 + pars.octave.o1 + pars.octave.o15 + pars.octave.o2; | |||
if (sum<0.5f) sum = 0.5f; | |||
for (int i = 0; i<nfreq; i++) freq2[i] = sumfreq[i] / sum; | |||
}; | |||
inline void spectrum_do_filter(const ProcessParameters& pars, int nfreq, double samplerate, REALTYPE *freq1, REALTYPE *freq2) { | |||
REALTYPE low = 0, high = 0; | |||
if (pars.filter.low<pars.filter.high) {//sort the low/high freqs | |||
low = pars.filter.low; | |||
high = pars.filter.high; | |||
} | |||
else { | |||
high = pars.filter.low; | |||
low = pars.filter.high; | |||
}; | |||
int ilow = (int)(low / samplerate * nfreq*2.0f); | |||
int ihigh = (int)(high / samplerate * nfreq*2.0f); | |||
REALTYPE dmp = 1.0; | |||
REALTYPE dmprap = 1.0f - pow(pars.filter.hdamp*0.5f, 4.0f); | |||
for (int i = 0; i<nfreq; i++) { | |||
REALTYPE a = 0.0f; | |||
if ((i >= ilow) && (i<ihigh)) a = 1.0f; | |||
if (pars.filter.stop) a = 1.0f - a; | |||
freq2[i] = freq1[i] * a*dmp; | |||
dmp *= dmprap + 1e-8f; | |||
}; | |||
}; | |||
class SpectrumProcess | |||
{ | |||
public: | |||
@@ -20,7 +20,7 @@ | |||
#include <stdlib.h> | |||
#include <math.h> | |||
FFT::FFT(int nsamples_) | |||
FFT::FFT(int nsamples_, bool no_inverse) | |||
{ | |||
nsamples=nsamples_; | |||
if (nsamples%2!=0) { | |||
@@ -43,13 +43,15 @@ FFT::FFT(int nsamples_) | |||
{ | |||
//fftwf_plan_with_nthreads(2); | |||
planfftw=fftwf_plan_r2r_1d(nsamples,data.data(),data.data(),FFTW_R2HC,FFTW_MEASURE); | |||
planifftw=fftwf_plan_r2r_1d(nsamples,data.data(),data.data(),FFTW_HC2R,FFTW_MEASURE); | |||
if (no_inverse == false) | |||
planifftw=fftwf_plan_r2r_1d(nsamples,data.data(),data.data(),FFTW_HC2R,FFTW_MEASURE); | |||
} else | |||
{ | |||
//fftwf_plan_with_nthreads(2); | |||
planfftw=fftwf_plan_r2r_1d(nsamples,data.data(),data.data(),FFTW_R2HC,FFTW_ESTIMATE); | |||
//fftwf_plan_with_nthreads(2); | |||
planifftw=fftwf_plan_r2r_1d(nsamples,data.data(),data.data(),FFTW_HC2R,FFTW_ESTIMATE); | |||
if (no_inverse == false) | |||
planifftw=fftwf_plan_r2r_1d(nsamples,data.data(),data.data(),FFTW_HC2R,FFTW_ESTIMATE); | |||
} | |||
//double t1 = Time::getMillisecondCounterHiRes(); | |||
//Logger::writeToLog("Creating FFTW3 plans took "+String(t1-t0)+ "ms"); | |||
@@ -64,7 +66,8 @@ FFT::FFT(int nsamples_) | |||
FFT::~FFT() | |||
{ | |||
fftwf_destroy_plan(planfftw); | |||
fftwf_destroy_plan(planifftw); | |||
if (planifftw!=nullptr) | |||
fftwf_destroy_plan(planifftw); | |||
}; | |||
void FFT::smp2freq() | |||
@@ -122,7 +122,7 @@ enum FFTWindow{W_RECTANGULAR,W_HAMMING,W_HANN,W_BLACKMAN,W_BLACKMAN_HARRIS}; | |||
class FFT | |||
{//FFT class that considers phases as random | |||
public: | |||
FFT(int nsamples_);//samples must be even | |||
FFT(int nsamples_, bool no_inverse=false);//samples must be even | |||
~FFT(); | |||
void smp2freq();//input is smp, output is freq (phases are discarded) | |||
void freq2smp();//input is freq,output is smp (phases are random) | |||
@@ -192,7 +192,7 @@ class Stretch | |||
virtual void process_spectrum(REALTYPE *){}; | |||
virtual REALTYPE get_stretch_multiplier(REALTYPE pos_percents); | |||
REALTYPE samplerate; | |||
REALTYPE samplerate=0.0f; | |||
private: | |||
@@ -129,6 +129,24 @@ void StretchAudioSource::setAudioBufferAsInputSource(AudioBuffer<float>* buf, in | |||
setPlayRange({ 0.0,1.0 }, true); | |||
} | |||
void StretchAudioSource::setMainVolume(double decibels) | |||
{ | |||
if (decibels == m_main_volume) | |||
return; | |||
std::lock_guard <decltype(m_mutex)> locker(m_mutex); | |||
m_main_volume = jlimit(-144.0, 12.0, decibels); | |||
++m_param_change_count; | |||
} | |||
void StretchAudioSource::setLoopXFadeLength(double lenseconds) | |||
{ | |||
if (lenseconds == m_loopxfadelen) | |||
return; | |||
std::lock_guard <decltype(m_mutex)> locker(m_mutex); | |||
m_loopxfadelen = jlimit(0.0, 1.0, lenseconds); | |||
++m_param_change_count; | |||
} | |||
void StretchAudioSource::getNextAudioBlock(const AudioSourceChannelInfo & bufferToFill) | |||
{ | |||
// for realtime play, this is assumed to be used with BufferingAudioSource, so mutex locking should not be too bad... | |||
@@ -145,7 +163,7 @@ void StretchAudioSource::getNextAudioBlock(const AudioSourceChannelInfo & buffer | |||
e->set_freezing(m_freezing); | |||
} | |||
double maingain = Decibels::decibelsToGain((double)val_MainVolume.getValue()); | |||
double maingain = Decibels::decibelsToGain(m_main_volume); | |||
if (m_vol_smoother.getTargetValue() != maingain) | |||
m_vol_smoother.setValue(maingain); | |||
FloatVectorOperations::disableDenormalisedNumberSupport(); | |||
@@ -158,7 +176,7 @@ void StretchAudioSource::getNextAudioBlock(const AudioSourceChannelInfo & buffer | |||
return; | |||
if (m_inputfile->info.nsamples == 0) | |||
return; | |||
m_inputfile->setXFadeLenSeconds(val_XFadeLen.getValue()); | |||
m_inputfile->setXFadeLenSeconds(m_loopxfadelen); | |||
double silencethreshold = Decibels::decibelsToGain(-70.0); | |||
bool tempfirst = true; | |||
@@ -458,7 +476,7 @@ void StretchAudioSource::setProcessParameters(ProcessParameters * pars) | |||
++m_param_change_count; | |||
} | |||
ProcessParameters StretchAudioSource::getProcessParameters() | |||
const ProcessParameters& StretchAudioSource::getProcessParameters() | |||
{ | |||
return m_ppar; | |||
} | |||
@@ -545,6 +563,7 @@ void StretchAudioSource::setPlayRange(Range<double> playrange, bool isloop) | |||
m_inputfile->seek(m_playrange.getStart()); | |||
m_seekpos = m_playrange.getStart(); | |||
++m_param_change_count; | |||
} | |||
bool StretchAudioSource::isLoopEnabled() | |||
@@ -633,8 +652,8 @@ void MultiStretchAudioSource::getNextAudioBlock(const AudioSourceChannelInfo & b | |||
m_blocksize = bufferToFill.numSamples; | |||
if (m_is_in_switch == false) | |||
{ | |||
getActiveStretchSource()->val_MainVolume.setValue(val_MainVolume.getValue()); | |||
getActiveStretchSource()->val_XFadeLen.setValue(val_XFadeLen.getValue()); | |||
getActiveStretchSource()->setMainVolume(val_MainVolume.getValue()); | |||
getActiveStretchSource()->setLoopXFadeLength(val_XFadeLen.getValue()); | |||
getActiveStretchSource()->setFreezing(m_freezing); | |||
getActiveStretchSource()->getNextAudioBlock(bufferToFill); | |||
@@ -648,10 +667,10 @@ void MultiStretchAudioSource::getNextAudioBlock(const AudioSourceChannelInfo & b | |||
} | |||
AudioSourceChannelInfo ascinfo1(m_processbuffers[0]); | |||
AudioSourceChannelInfo ascinfo2(m_processbuffers[1]); | |||
m_stretchsources[0]->val_MainVolume.setValue(val_MainVolume.getValue()); | |||
m_stretchsources[1]->val_MainVolume.setValue(val_MainVolume.getValue()); | |||
m_stretchsources[0]->val_XFadeLen.setValue(val_XFadeLen.getValue()); | |||
m_stretchsources[1]->val_XFadeLen.setValue(val_XFadeLen.getValue()); | |||
m_stretchsources[0]->setMainVolume(val_MainVolume.getValue()); | |||
m_stretchsources[1]->setMainVolume(val_MainVolume.getValue()); | |||
m_stretchsources[0]->setLoopXFadeLength(val_XFadeLen.getValue()); | |||
m_stretchsources[1]->setLoopXFadeLength(val_XFadeLen.getValue()); | |||
m_stretchsources[0]->setFreezing(m_freezing); | |||
m_stretchsources[1]->setFreezing(m_freezing); | |||
m_stretchsources[1]->setFFTWindowingType(m_stretchsources[0]->getFFTWindowingType()); | |||
@@ -59,7 +59,7 @@ public: | |||
void setRate(double rate); | |||
double getRate() { return m_playrate; } | |||
void setProcessParameters(ProcessParameters* pars); | |||
ProcessParameters getProcessParameters(); | |||
const ProcessParameters& getProcessParameters(); | |||
void setFFTSize(int size); | |||
int getFFTSize() { return m_process_fftsize; } | |||
@@ -82,8 +82,7 @@ public: | |||
void setFFTWindowingType(int windowtype); | |||
int getFFTWindowingType() { return m_fft_window_type; } | |||
std::pair<Range<double>,Range<double>> getFileCachedRangesNormalized(); | |||
Value val_MainVolume; | |||
Value val_XFadeLen; | |||
ValueTree getStateTree(); | |||
void setStateTree(ValueTree state); | |||
void setClippingEnabled(bool b) { m_clip_output = b; } | |||
@@ -91,6 +90,10 @@ public: | |||
void setLoopingEnabled(bool b); | |||
void setMaxLoops(int64_t numloops) { m_maxloops = numloops; } | |||
void setAudioBufferAsInputSource(AudioBuffer<float>* buf, int sr, int len); | |||
void setMainVolume(double decibels); | |||
double getMainVolume() const { return m_main_volume; } | |||
void setLoopXFadeLength(double lenseconds); | |||
double getLoopXFadeLengtj() const { return m_loopxfadelen; } | |||
int m_param_change_count = 0; | |||
private: | |||
CircularBuffer<float> m_stretchoutringbuf{ 1024 * 1024 }; | |||
@@ -107,6 +110,8 @@ private: | |||
double m_outsr = 44100.0; | |||
int m_process_fftsize = 0; | |||
int m_fft_window_type = -1; | |||
double m_main_volume = 0.0; | |||
double m_loopxfadelen = 0.0; | |||
ProcessParameters m_ppar; | |||
BinauralBeatsParameters m_bbpar; | |||
double m_playrate = 1.0; | |||
@@ -42,7 +42,7 @@ PaulstretchpluginAudioProcessorEditor::PaulstretchpluginAudioProcessorEditor (Pa | |||
addAndMakeVisible(&m_rec_enable); | |||
m_rec_enable.setButtonText("Capture"); | |||
attachCallback(m_rec_enable, [this]() { processor.setRecordingEnabled(m_rec_enable.getToggleState()); }); | |||
addAndMakeVisible(&m_specvis); | |||
setSize (700, 30+pars.size()*25+200); | |||
m_wavecomponent.TimeSelectionChangedCallback = [this](Range<double> range, int which) | |||
{ | |||
@@ -56,6 +56,7 @@ PaulstretchpluginAudioProcessorEditor::PaulstretchpluginAudioProcessorEditor (Pa | |||
m_wavecomponent.ShowFileCacheRange = true; | |||
startTimer(1, 100); | |||
startTimer(2, 1000); | |||
startTimer(3, 200); | |||
m_wavecomponent.startTimer(100); | |||
} | |||
@@ -79,10 +80,11 @@ void PaulstretchpluginAudioProcessorEditor::resized() | |||
for (int i = 0; i < m_parcomps.size(); ++i) | |||
{ | |||
m_parcomps[i]->setBounds(1, 30 + i * 25, 598, 24); | |||
m_parcomps[i]->setBounds(1, 30 + i * 25, getWidth()-2, 24); | |||
} | |||
int yoffs = m_parcomps.back()->getBottom() + 1; | |||
m_wavecomponent.setBounds(1, yoffs, getWidth()-2, getHeight()-1-yoffs); | |||
//m_wavecomponent.setBounds(1, yoffs, getWidth()-2, getHeight()-1-yoffs); | |||
m_specvis.setBounds(1, yoffs, getWidth() - 2, getHeight() - 1 - yoffs); | |||
} | |||
void PaulstretchpluginAudioProcessorEditor::timerCallback(int id) | |||
@@ -108,6 +110,12 @@ void PaulstretchpluginAudioProcessorEditor::timerCallback(int id) | |||
m_wavecomponent.setAudioFile(processor.getAudioFile()); | |||
} | |||
m_wavecomponent.setTimeSelection(processor.getTimeSelection()); | |||
} | |||
if (id == 3) | |||
{ | |||
m_specvis.setState(processor.getStretchSource()->getProcessParameters(), processor.getStretchSource()->getFFTSize() / 2, | |||
processor.getSampleRate()); | |||
} | |||
} | |||
@@ -431,3 +439,77 @@ int WaveformComponent::getTimeSelectionEdge(int x, int y) | |||
return 0; | |||
} | |||
SpectralVisualizer::SpectralVisualizer() | |||
{ | |||
m_img = Image(Image::RGB, 500, 200, true); | |||
} | |||
void SpectralVisualizer::setState(const ProcessParameters & pars, int nfreqs, double samplerate) | |||
{ | |||
double t0 = Time::getMillisecondCounterHiRes(); | |||
double hz = 440.0; | |||
int numharmonics = 40; | |||
double scaler = 1.0 / numharmonics; | |||
if (m_img.getWidth()!=getWidth() || m_img.getHeight()!=getHeight()) | |||
m_img = Image(Image::RGB, getWidth(), getHeight(), true); | |||
if (m_nfreqs == 0 || nfreqs != m_nfreqs) | |||
{ | |||
m_nfreqs = nfreqs; | |||
m_insamples = std::vector<REALTYPE>(nfreqs * 2); | |||
m_freqs1 = std::vector<REALTYPE>(nfreqs); | |||
m_freqs2 = std::vector<REALTYPE>(nfreqs); | |||
m_freqs3 = std::vector<REALTYPE>(nfreqs); | |||
m_fft = std::make_unique<FFT>(nfreqs*2); | |||
std::fill(m_insamples.begin(), m_insamples.end(), 0.0f); | |||
for (int i = 0; i < nfreqs; ++i) | |||
{ | |||
for (int j = 0; j < numharmonics; ++j) | |||
{ | |||
double oscgain = 1.0 - (1.0 / numharmonics)*j; | |||
m_insamples[i] += scaler * oscgain * sin(2 * 3.141592653 / samplerate * i* (hz + hz * j)); | |||
} | |||
} | |||
} | |||
//std::fill(m_freqs1.begin(), m_freqs1.end(), 0.0f); | |||
//std::fill(m_freqs2.begin(), m_freqs2.end(), 0.0f); | |||
//std::fill(m_freqs3.begin(), m_freqs3.end(), 0.0f); | |||
//std::fill(m_fft->freq.begin(), m_fft->freq.end(), 0.0f); | |||
for (int i = 0; i < nfreqs; ++i) | |||
{ | |||
m_fft->smp[i] = m_insamples[i]; | |||
} | |||
m_fft->applywindow(W_HAMMING); | |||
m_fft->smp2freq(); | |||
double ratio = pow(2.0f, pars.pitch_shift.cents / 1200.0f); | |||
spectrum_do_pitch_shift(pars, nfreqs, m_fft->freq.data(), m_freqs2.data(), ratio); | |||
spectrum_do_freq_shift(pars, nfreqs, samplerate, m_freqs2.data(), m_freqs1.data()); | |||
spectrum_do_compressor(pars, nfreqs, m_freqs1.data(), m_freqs2.data()); | |||
spectrum_spread(nfreqs, samplerate, m_freqs3, m_freqs2.data(), m_freqs1.data(), pars.spread.bandwidth); | |||
if (pars.harmonics.enabled) | |||
spectrum_do_harmonics(pars, m_freqs3, nfreqs, samplerate, m_freqs1.data(), m_freqs2.data()); | |||
else spectrum_copy(nfreqs, m_freqs1.data(), m_freqs2.data()); | |||
Graphics g(m_img); | |||
g.fillAll(Colours::black); | |||
g.setColour(Colours::white); | |||
for (int i = 0; i < nfreqs; ++i) | |||
{ | |||
double binfreq = (samplerate / 2 / nfreqs)*i; | |||
double xcor = jmap<double>(binfreq, 0.0, samplerate / 2.0, 0.0, getWidth()); | |||
double ycor = getHeight()- jmap<double>(m_freqs2[i], 0.0, nfreqs/128, 0.0, getHeight()); | |||
ycor = jlimit<double>(0.0, getHeight(), ycor); | |||
g.drawLine(xcor, getHeight(), xcor, ycor, 1.0); | |||
} | |||
double t1 = Time::getMillisecondCounterHiRes(); | |||
m_elapsed = t1 - t0; | |||
repaint(); | |||
} | |||
void SpectralVisualizer::paint(Graphics & g) | |||
{ | |||
g.drawImage(m_img, 0, 0, getWidth(), getHeight(), 0, 0, m_img.getWidth(), m_img.getHeight()); | |||
g.setColour(Colours::yellow); | |||
g.drawText(String(m_elapsed, 1)+" ms", 1, 1, getWidth(), 30, Justification::topLeft); | |||
} |
@@ -15,6 +15,21 @@ | |||
#include <memory> | |||
#include <vector> | |||
class SpectralVisualizer : public Component | |||
{ | |||
public: | |||
SpectralVisualizer(); | |||
void setState(const ProcessParameters& pars, int nfreqs, double samplerate); | |||
void paint(Graphics& g) override; | |||
private: | |||
Image m_img; | |||
std::vector<REALTYPE> m_insamples,m_freqs1, m_freqs2, m_freqs3; | |||
std::unique_ptr<FFT> m_fft; | |||
int m_nfreqs = 0; | |||
double m_elapsed = 0.0; | |||
}; | |||
inline void attachCallback(Button& button, std::function<void()> callback) | |||
{ | |||
struct ButtonCallback : public Button::Listener, | |||
@@ -240,6 +255,7 @@ public: | |||
private: | |||
PaulstretchpluginAudioProcessor& processor; | |||
std::vector<std::shared_ptr<ParameterComponent>> m_parcomps; | |||
SpectralVisualizer m_specvis; | |||
ToggleButton m_rec_enable; | |||
TextButton m_import_button; | |||
Label m_info_label; | |||
@@ -12,8 +12,10 @@ | |||
#include "PluginEditor.h" | |||
#include <set> | |||
#ifdef WIN32 | |||
#undef min | |||
#undef max | |||
#endif | |||
std::set<PaulstretchpluginAudioProcessor*> g_activeprocessors; | |||
@@ -82,6 +84,7 @@ PaulstretchpluginAudioProcessor::PaulstretchpluginAudioProcessor() | |||
m_ppar.freq_shift.enabled = true; | |||
m_stretch_source->setOnsetDetection(0.0); | |||
m_stretch_source->setLoopingEnabled(true); | |||
m_stretch_source->setFFTWindowingType(1); | |||
addParameter(new AudioParameterFloat("mainvolume0", "Main volume", -24.0f, 12.0f, -3.0f)); // 0 | |||
addParameter(new AudioParameterFloat("stretchamount0", "Stretch amount", | |||
NormalisableRange<float>(0.1f, 128.0f, 0.01f, 0.5),1.0f)); // 1 | |||
@@ -92,6 +95,10 @@ PaulstretchpluginAudioProcessor::PaulstretchpluginAudioProcessor() | |||
addParameter(new AudioParameterFloat("playrange_end0", "Sound end", 0.0f, 1.0f, 1.0f)); // 6 | |||
addParameter(new AudioParameterBool("freeze0", "Freeze", false)); // 7 | |||
addParameter(new AudioParameterFloat("spread0", "Frequency spread", 0.0f, 1.0f, 0.0f)); // 8 | |||
addParameter(new AudioParameterFloat("compress0", "Compress", 0.0f, 1.0f, 0.0f)); // 9 | |||
addParameter(new AudioParameterFloat("loopxfadelen0", "Loop xfade length", 0.0f, 1.0f, 0.0f)); // 10 | |||
addParameter(new AudioParameterFloat("numharmonics0", "Num harmonics", 0.0f, 100.0f, 0.0f)); // 11 | |||
addParameter(new AudioParameterFloat("harmonicsfreq0", "Harmonics base freq", 1.0f, 5000.0f, 100.0f)); // 12 | |||
} | |||
PaulstretchpluginAudioProcessor::~PaulstretchpluginAudioProcessor() | |||
@@ -145,7 +152,7 @@ bool PaulstretchpluginAudioProcessor::isMidiEffect() const | |||
double PaulstretchpluginAudioProcessor::getTailLengthSeconds() const | |||
{ | |||
return 0.0; | |||
return (double)m_bufamounts[m_prebuffer_amount]/getSampleRate(); | |||
} | |||
int PaulstretchpluginAudioProcessor::getNumPrograms() | |||
@@ -306,14 +313,19 @@ void PaulstretchpluginAudioProcessor::processBlock (AudioSampleBuffer& buffer, M | |||
} | |||
jassert(m_buffering_source != nullptr); | |||
jassert(m_bufferingthread.isThreadRunning()); | |||
m_stretch_source->val_MainVolume = (float)*getFloatParameter(0); | |||
m_stretch_source->setMainVolume(*getFloatParameter(0)); | |||
m_stretch_source->setRate(*getFloatParameter(1)); | |||
m_stretch_source->val_XFadeLen = 0.1; | |||
setFFTSize(*getFloatParameter(2)); | |||
m_ppar.pitch_shift.cents = *getFloatParameter(3) * 100.0; | |||
m_ppar.freq_shift.Hz = *getFloatParameter(4); | |||
m_ppar.spread.enabled = *getFloatParameter(8) > 0.0f; | |||
m_ppar.spread.bandwidth = *getFloatParameter(8); | |||
m_ppar.compressor.power = *getFloatParameter(9); | |||
m_ppar.harmonics.enabled = *getFloatParameter(11)>=1.0; | |||
m_ppar.harmonics.nharmonics = *getFloatParameter(11); | |||
m_ppar.harmonics.freq = *getFloatParameter(12); | |||
m_stretch_source->setLoopXFadeLength(*getFloatParameter(10)); | |||
double t0 = *getFloatParameter(5); | |||
double t1 = *getFloatParameter(6); | |||
if (t0 > t1) | |||
@@ -73,7 +73,7 @@ private: | |||
bool m_ready_to_play = false; | |||
AudioBuffer<float> m_recbuffer; | |||
double m_max_reclen = 5; | |||
double m_max_reclen = 10.0; | |||
bool m_is_recording = false; | |||
int m_rec_pos = 0; | |||
void finishRecording(int lenrecorded); | |||