| @@ -13,7 +13,166 @@ namespace core { | |||||
| template <int NUM_AUDIO_INPUTS, int NUM_AUDIO_OUTPUTS> | template <int NUM_AUDIO_INPUTS, int NUM_AUDIO_OUTPUTS> | ||||
| struct AudioInterface : Module, audio::Port { | |||||
| struct AudioInterfacePort : audio::Port { | |||||
| Module* module; | |||||
| dsp::DoubleRingBuffer<dsp::Frame<NUM_AUDIO_INPUTS>, 32768> engineInputBuffer; | |||||
| dsp::DoubleRingBuffer<dsp::Frame<NUM_AUDIO_OUTPUTS>, 32768> engineOutputBuffer; | |||||
| dsp::SampleRateConverter<NUM_AUDIO_INPUTS> inputSrc; | |||||
| dsp::SampleRateConverter<NUM_AUDIO_OUTPUTS> outputSrc; | |||||
| // Port variable caches | |||||
| int deviceNumInputs = 0; | |||||
| int deviceNumOutputs = 0; | |||||
| float deviceSampleRate = 0.f; | |||||
| int requestedEngineFrames = 0; | |||||
| AudioInterfacePort(Module* module) { | |||||
| this->module = module; | |||||
| maxOutputs = NUM_AUDIO_INPUTS; | |||||
| maxInputs = NUM_AUDIO_OUTPUTS; | |||||
| inputSrc.setQuality(6); | |||||
| outputSrc.setQuality(6); | |||||
| } | |||||
| void setPrimary() { | |||||
| APP->engine->setPrimaryModule(module); | |||||
| } | |||||
| bool isPrimary() { | |||||
| return APP->engine->getPrimaryModule() == module; | |||||
| } | |||||
| void processInput(const float* input, int inputStride, int frames) override { | |||||
| // DEBUG("%p: new device block ____________________________", this); | |||||
| // Claim primary module if there is none | |||||
| if (!APP->engine->getPrimaryModule()) { | |||||
| setPrimary(); | |||||
| } | |||||
| bool isPrimaryCached = isPrimary(); | |||||
| // Set sample rate of engine if engine sample rate is "auto". | |||||
| if (isPrimaryCached) { | |||||
| APP->engine->setSuggestedSampleRate(deviceSampleRate); | |||||
| } | |||||
| float engineSampleRate = APP->engine->getSampleRate(); | |||||
| float sampleRateRatio = engineSampleRate / deviceSampleRate; | |||||
| // DEBUG("%p: %d block, engineOutputBuffer still has %d", this, frames, (int) engineOutputBuffer.size()); | |||||
| // Consider engine buffers "too full" if they contain a bit more than the audio device's number of frames, converted to engine sample rate. | |||||
| int maxEngineFrames = (int) std::ceil(frames * sampleRateRatio * 2.0) - 1; | |||||
| // If the engine output buffer is too full, clear it to keep latency low. No need to clear if primary because it's always cleared below. | |||||
| if (!isPrimaryCached && (int) engineOutputBuffer.size() > maxEngineFrames) { | |||||
| engineOutputBuffer.clear(); | |||||
| // DEBUG("%p: clearing engine output", this); | |||||
| } | |||||
| if (deviceNumInputs > 0) { | |||||
| // Always clear engine output if primary | |||||
| if (isPrimaryCached) { | |||||
| engineOutputBuffer.clear(); | |||||
| } | |||||
| // Set up sample rate converter | |||||
| outputSrc.setRates(deviceSampleRate, engineSampleRate); | |||||
| outputSrc.setChannels(deviceNumInputs); | |||||
| // Convert audio input -> engine output | |||||
| dsp::Frame<NUM_AUDIO_OUTPUTS> audioInputBuffer[frames]; | |||||
| std::memset(audioInputBuffer, 0, sizeof(audioInputBuffer)); | |||||
| for (int i = 0; i < frames; i++) { | |||||
| for (int j = 0; j < deviceNumInputs; j++) { | |||||
| float v = input[i * inputStride + j]; | |||||
| audioInputBuffer[i].samples[j] = v; | |||||
| } | |||||
| } | |||||
| int audioInputFrames = frames; | |||||
| int outputFrames = engineOutputBuffer.capacity(); | |||||
| outputSrc.process(audioInputBuffer, &audioInputFrames, engineOutputBuffer.endData(), &outputFrames); | |||||
| engineOutputBuffer.endIncr(outputFrames); | |||||
| // Request exactly as many frames as we have in the engine output buffer. | |||||
| requestedEngineFrames = engineOutputBuffer.size(); | |||||
| } | |||||
| else { | |||||
| // Upper bound on number of frames so that `audioOutputFrames >= frames` when processOutput() is called. | |||||
| requestedEngineFrames = std::max((int) std::ceil(frames * sampleRateRatio) - (int) engineInputBuffer.size(), 0); | |||||
| } | |||||
| } | |||||
| void processBuffer(const float* input, int inputStride, float* output, int outputStride, int frames) override { | |||||
| // Step engine | |||||
| if (isPrimary() && requestedEngineFrames > 0) { | |||||
| // DEBUG("%p: %d block, stepping %d", this, frames, requestedEngineFrames); | |||||
| APP->engine->stepBlock(requestedEngineFrames); | |||||
| } | |||||
| } | |||||
| void processOutput(float* output, int outputStride, int frames) override { | |||||
| // bool isPrimaryCached = isPrimary(); | |||||
| float engineSampleRate = APP->engine->getSampleRate(); | |||||
| float sampleRateRatio = engineSampleRate / deviceSampleRate; | |||||
| if (deviceNumOutputs > 0) { | |||||
| // Set up sample rate converter | |||||
| inputSrc.setRates(engineSampleRate, deviceSampleRate); | |||||
| inputSrc.setChannels(deviceNumOutputs); | |||||
| // Convert engine input -> audio output | |||||
| dsp::Frame<NUM_AUDIO_OUTPUTS> audioOutputBuffer[frames]; | |||||
| int inputFrames = engineInputBuffer.size(); | |||||
| int audioOutputFrames = frames; | |||||
| inputSrc.process(engineInputBuffer.startData(), &inputFrames, audioOutputBuffer, &audioOutputFrames); | |||||
| engineInputBuffer.startIncr(inputFrames); | |||||
| // Copy the audio output buffer | |||||
| for (int i = 0; i < audioOutputFrames; i++) { | |||||
| for (int j = 0; j < deviceNumOutputs; j++) { | |||||
| float v = audioOutputBuffer[i].samples[j]; | |||||
| v = clamp(v, -1.f, 1.f); | |||||
| output[i * outputStride + j] = v; | |||||
| } | |||||
| } | |||||
| // Fill the rest of the audio output buffer with zeros | |||||
| for (int i = audioOutputFrames; i < frames; i++) { | |||||
| for (int j = 0; j < deviceNumOutputs; j++) { | |||||
| output[i * outputStride + j] = 0.f; | |||||
| } | |||||
| } | |||||
| } | |||||
| // DEBUG("%p: %d block, engineInputBuffer left %d", this, frames, (int) engineInputBuffer.size()); | |||||
| // If the engine input buffer is too full, clear it to keep latency low. | |||||
| int maxEngineFrames = (int) std::ceil(frames * sampleRateRatio * 2.0) - 1; | |||||
| if ((int) engineInputBuffer.size() > maxEngineFrames) { | |||||
| engineInputBuffer.clear(); | |||||
| // DEBUG("%p: clearing engine input", this); | |||||
| } | |||||
| // DEBUG("%p %s:\tframes %d requestedEngineFrames %d\toutputBuffer %d engineInputBuffer %d\t", this, isPrimaryCached ? "primary" : "secondary", frames, requestedEngineFrames, engineOutputBuffer.size(), engineInputBuffer.size()); | |||||
| } | |||||
| void onStartStream() override { | |||||
| deviceNumInputs = std::min(getNumInputs(), NUM_AUDIO_OUTPUTS); | |||||
| deviceNumOutputs = std::min(getNumOutputs(), NUM_AUDIO_INPUTS); | |||||
| deviceSampleRate = getSampleRate(); | |||||
| engineInputBuffer.clear(); | |||||
| engineOutputBuffer.clear(); | |||||
| // DEBUG("onStartStream %d %d %f", deviceNumInputs, deviceNumOutputs, deviceSampleRate); | |||||
| } | |||||
| void onStopStream() override { | |||||
| deviceNumInputs = 0; | |||||
| deviceNumOutputs = 0; | |||||
| deviceSampleRate = 0.f; | |||||
| engineInputBuffer.clear(); | |||||
| engineOutputBuffer.clear(); | |||||
| // DEBUG("onStopStream"); | |||||
| } | |||||
| }; | |||||
| template <int NUM_AUDIO_INPUTS, int NUM_AUDIO_OUTPUTS> | |||||
| struct AudioInterface : Module { | |||||
| static constexpr int NUM_INPUT_LIGHTS = (NUM_AUDIO_INPUTS > 2) ? (NUM_AUDIO_INPUTS / 2) : 0; | static constexpr int NUM_INPUT_LIGHTS = (NUM_AUDIO_INPUTS > 2) ? (NUM_AUDIO_INPUTS / 2) : 0; | ||||
| static constexpr int NUM_OUTPUT_LIGHTS = (NUM_AUDIO_OUTPUTS > 2) ? (NUM_AUDIO_OUTPUTS / 2) : 0; | static constexpr int NUM_OUTPUT_LIGHTS = (NUM_AUDIO_OUTPUTS > 2) ? (NUM_AUDIO_OUTPUTS / 2) : 0; | ||||
| @@ -36,28 +195,18 @@ struct AudioInterface : Module, audio::Port { | |||||
| NUM_LIGHTS | NUM_LIGHTS | ||||
| }; | }; | ||||
| AudioInterfacePort<NUM_AUDIO_INPUTS, NUM_AUDIO_OUTPUTS> port; | |||||
| dsp::RCFilter dcFilters[NUM_AUDIO_INPUTS]; | dsp::RCFilter dcFilters[NUM_AUDIO_INPUTS]; | ||||
| bool dcFilterEnabled = false; | bool dcFilterEnabled = false; | ||||
| dsp::DoubleRingBuffer<dsp::Frame<NUM_AUDIO_INPUTS>, 32768> engineInputBuffer; | |||||
| dsp::DoubleRingBuffer<dsp::Frame<NUM_AUDIO_OUTPUTS>, 32768> engineOutputBuffer; | |||||
| dsp::SampleRateConverter<NUM_AUDIO_INPUTS> inputSrc; | |||||
| dsp::SampleRateConverter<NUM_AUDIO_OUTPUTS> outputSrc; | |||||
| dsp::ClockDivider lightDivider; | dsp::ClockDivider lightDivider; | ||||
| // For each pair of inputs/outputs | // For each pair of inputs/outputs | ||||
| float inputClipTimers[(NUM_AUDIO_INPUTS > 0) ? NUM_INPUT_LIGHTS : 0] = {}; | float inputClipTimers[(NUM_AUDIO_INPUTS > 0) ? NUM_INPUT_LIGHTS : 0] = {}; | ||||
| float outputClipTimers[(NUM_AUDIO_INPUTS > 0) ? NUM_OUTPUT_LIGHTS : 0] = {}; | float outputClipTimers[(NUM_AUDIO_INPUTS > 0) ? NUM_OUTPUT_LIGHTS : 0] = {}; | ||||
| dsp::VuMeter2 vuMeter[(NUM_AUDIO_INPUTS == 2) ? 2 : 0]; | dsp::VuMeter2 vuMeter[(NUM_AUDIO_INPUTS == 2) ? 2 : 0]; | ||||
| // Port variable caches | |||||
| int deviceNumInputs = 0; | |||||
| int deviceNumOutputs = 0; | |||||
| float deviceSampleRate = 0.f; | |||||
| int requestedEngineFrames = 0; | |||||
| AudioInterface() { | |||||
| AudioInterface() : port(this) { | |||||
| config(NUM_PARAMS, NUM_INPUTS, NUM_OUTPUTS, NUM_LIGHTS); | config(NUM_PARAMS, NUM_INPUTS, NUM_OUTPUTS, NUM_LIGHTS); | ||||
| if (NUM_AUDIO_INPUTS == 2) | if (NUM_AUDIO_INPUTS == 2) | ||||
| configParam(GAIN_PARAM, 0.f, 2.f, 1.f, "Level", " dB", -10, 40); | configParam(GAIN_PARAM, 0.f, 2.f, 1.f, "Level", " dB", -10, 40); | ||||
| @@ -71,26 +220,22 @@ struct AudioInterface : Module, audio::Port { | |||||
| configLight(OUTPUT_LIGHTS + 2 * i, string::f("Device input %d/%d status", 2 * i + 1, 2 * i + 2)); | configLight(OUTPUT_LIGHTS + 2 * i, string::f("Device input %d/%d status", 2 * i + 1, 2 * i + 2)); | ||||
| lightDivider.setDivision(512); | lightDivider.setDivision(512); | ||||
| maxOutputs = NUM_AUDIO_INPUTS; | |||||
| maxInputs = NUM_AUDIO_OUTPUTS; | |||||
| inputSrc.setQuality(6); | |||||
| outputSrc.setQuality(6); | |||||
| float sampleTime = APP->engine->getSampleTime(); | float sampleTime = APP->engine->getSampleTime(); | ||||
| for (int i = 0; i < NUM_AUDIO_INPUTS; i++) { | for (int i = 0; i < NUM_AUDIO_INPUTS; i++) { | ||||
| dcFilters[i].setCutoffFreq(10.f * sampleTime); | dcFilters[i].setCutoffFreq(10.f * sampleTime); | ||||
| } | } | ||||
| reset(); | |||||
| onReset(); | |||||
| } | } | ||||
| ~AudioInterface() { | ~AudioInterface() { | ||||
| // Close stream here before destructing AudioInterfacePort, so processBuffer() etc are not called on another thread while destructing. | // Close stream here before destructing AudioInterfacePort, so processBuffer() etc are not called on another thread while destructing. | ||||
| setDriverId(-1); | |||||
| port.setDriverId(-1); | |||||
| } | } | ||||
| void onReset() override { | void onReset() override { | ||||
| setDriverId(-1); | |||||
| port.setDriverId(-1); | |||||
| if (NUM_AUDIO_INPUTS == 2) | if (NUM_AUDIO_INPUTS == 2) | ||||
| dcFilterEnabled = true; | dcFilterEnabled = true; | ||||
| @@ -99,8 +244,8 @@ struct AudioInterface : Module, audio::Port { | |||||
| } | } | ||||
| void onSampleRateChange(const SampleRateChangeEvent& e) override { | void onSampleRateChange(const SampleRateChangeEvent& e) override { | ||||
| engineInputBuffer.clear(); | |||||
| engineOutputBuffer.clear(); | |||||
| port.engineInputBuffer.clear(); | |||||
| port.engineOutputBuffer.clear(); | |||||
| for (int i = 0; i < NUM_AUDIO_INPUTS; i++) { | for (int i = 0; i < NUM_AUDIO_INPUTS; i++) { | ||||
| dcFilters[i].setCutoffFreq(10.f * e.sampleTime); | dcFilters[i].setCutoffFreq(10.f * e.sampleTime); | ||||
| @@ -111,9 +256,9 @@ struct AudioInterface : Module, audio::Port { | |||||
| const float clipTime = 0.25f; | const float clipTime = 0.25f; | ||||
| // Push inputs to buffer | // Push inputs to buffer | ||||
| if (deviceNumOutputs > 0) { | |||||
| if (port.deviceNumOutputs > 0) { | |||||
| dsp::Frame<NUM_AUDIO_INPUTS> inputFrame = {}; | dsp::Frame<NUM_AUDIO_INPUTS> inputFrame = {}; | ||||
| for (int i = 0; i < deviceNumOutputs; i++) { | |||||
| for (int i = 0; i < port.deviceNumOutputs; i++) { | |||||
| // Get input | // Get input | ||||
| float v = 0.f; | float v = 0.f; | ||||
| if (inputs[AUDIO_INPUTS + i].isConnected()) | if (inputs[AUDIO_INPUTS + i].isConnected()) | ||||
| @@ -144,8 +289,8 @@ struct AudioInterface : Module, audio::Port { | |||||
| } | } | ||||
| } | } | ||||
| if (!engineInputBuffer.full()) { | |||||
| engineInputBuffer.push(inputFrame); | |||||
| if (!port.engineInputBuffer.full()) { | |||||
| port.engineInputBuffer.push(inputFrame); | |||||
| } | } | ||||
| // Audio-2: VU meter process | // Audio-2: VU meter process | ||||
| @@ -165,8 +310,8 @@ struct AudioInterface : Module, audio::Port { | |||||
| } | } | ||||
| // Pull outputs from buffer | // Pull outputs from buffer | ||||
| if (!engineOutputBuffer.empty()) { | |||||
| dsp::Frame<NUM_AUDIO_OUTPUTS> outputFrame = engineOutputBuffer.shift(); | |||||
| if (!port.engineOutputBuffer.empty()) { | |||||
| dsp::Frame<NUM_AUDIO_OUTPUTS> outputFrame = port.engineOutputBuffer.shift(); | |||||
| for (int i = 0; i < NUM_AUDIO_OUTPUTS; i++) { | for (int i = 0; i < NUM_AUDIO_OUTPUTS; i++) { | ||||
| float v = outputFrame.samples[i]; | float v = outputFrame.samples[i]; | ||||
| outputs[AUDIO_OUTPUTS + i].setVoltage(10.f * v); | outputs[AUDIO_OUTPUTS + i].setVoltage(10.f * v); | ||||
| @@ -203,7 +348,7 @@ struct AudioInterface : Module, audio::Port { | |||||
| else { | else { | ||||
| // Turn on light if at least one port is enabled in the nearby pair. | // Turn on light if at least one port is enabled in the nearby pair. | ||||
| for (int i = 0; i < NUM_AUDIO_INPUTS / 2; i++) { | for (int i = 0; i < NUM_AUDIO_INPUTS / 2; i++) { | ||||
| bool active = deviceNumOutputs >= 2 * i + 1; | |||||
| bool active = port.deviceNumOutputs >= 2 * i + 1; | |||||
| bool clip = inputClipTimers[i] > 0.f; | bool clip = inputClipTimers[i] > 0.f; | ||||
| if (clip) | if (clip) | ||||
| inputClipTimers[i] -= lightTime; | inputClipTimers[i] -= lightTime; | ||||
| @@ -211,7 +356,7 @@ struct AudioInterface : Module, audio::Port { | |||||
| lights[INPUT_LIGHTS + i * 2 + 1].setBrightness(active && clip); | lights[INPUT_LIGHTS + i * 2 + 1].setBrightness(active && clip); | ||||
| } | } | ||||
| for (int i = 0; i < NUM_AUDIO_OUTPUTS / 2; i++) { | for (int i = 0; i < NUM_AUDIO_OUTPUTS / 2; i++) { | ||||
| bool active = deviceNumInputs >= 2 * i + 1; | |||||
| bool active = port.deviceNumInputs >= 2 * i + 1; | |||||
| bool clip = outputClipTimers[i] > 0.f; | bool clip = outputClipTimers[i] > 0.f; | ||||
| if (clip) | if (clip) | ||||
| outputClipTimers[i] -= lightTime; | outputClipTimers[i] -= lightTime; | ||||
| @@ -224,7 +369,7 @@ struct AudioInterface : Module, audio::Port { | |||||
| json_t* dataToJson() override { | json_t* dataToJson() override { | ||||
| json_t* rootJ = json_object(); | json_t* rootJ = json_object(); | ||||
| json_object_set_new(rootJ, "audio", audio::Port::toJson()); | |||||
| json_object_set_new(rootJ, "audio", port.toJson()); | |||||
| if (isPrimary()) | if (isPrimary()) | ||||
| json_object_set_new(rootJ, "primary", json_boolean(true)); | json_object_set_new(rootJ, "primary", json_boolean(true)); | ||||
| @@ -237,7 +382,7 @@ struct AudioInterface : Module, audio::Port { | |||||
| void dataFromJson(json_t* rootJ) override { | void dataFromJson(json_t* rootJ) override { | ||||
| json_t* audioJ = json_object_get(rootJ, "audio"); | json_t* audioJ = json_object_get(rootJ, "audio"); | ||||
| if (audioJ) | if (audioJ) | ||||
| audio::Port::fromJson(audioJ); | |||||
| port.fromJson(audioJ); | |||||
| // For not, don't deserialize primary module state. | // For not, don't deserialize primary module state. | ||||
| // json_t* primaryJ = json_object_get(rootJ, "primary"); | // json_t* primaryJ = json_object_get(rootJ, "primary"); | ||||
| @@ -258,133 +403,6 @@ struct AudioInterface : Module, audio::Port { | |||||
| bool isPrimary() { | bool isPrimary() { | ||||
| return APP->engine->getPrimaryModule() == this; | return APP->engine->getPrimaryModule() == this; | ||||
| } | } | ||||
| // audio::Port | |||||
| void processInput(const float* input, int inputStride, int frames) override { | |||||
| // DEBUG("%p: new device block ____________________________", this); | |||||
| // Claim primary module if there is none | |||||
| if (!APP->engine->getPrimaryModule()) { | |||||
| setPrimary(); | |||||
| } | |||||
| bool isPrimaryCached = isPrimary(); | |||||
| // Set sample rate of engine if engine sample rate is "auto". | |||||
| if (isPrimaryCached) { | |||||
| APP->engine->setSuggestedSampleRate(deviceSampleRate); | |||||
| } | |||||
| float engineSampleRate = APP->engine->getSampleRate(); | |||||
| float sampleRateRatio = engineSampleRate / deviceSampleRate; | |||||
| // DEBUG("%p: %d block, engineOutputBuffer still has %d", this, frames, (int) engineOutputBuffer.size()); | |||||
| // Consider engine buffers "too full" if they contain a bit more than the audio device's number of frames, converted to engine sample rate. | |||||
| int maxEngineFrames = (int) std::ceil(frames * sampleRateRatio * 2.0) - 1; | |||||
| // If the engine output buffer is too full, clear it to keep latency low. No need to clear if primary because it's always cleared below. | |||||
| if (!isPrimaryCached && (int) engineOutputBuffer.size() > maxEngineFrames) { | |||||
| engineOutputBuffer.clear(); | |||||
| // DEBUG("%p: clearing engine output", this); | |||||
| } | |||||
| if (deviceNumInputs > 0) { | |||||
| // Always clear engine output if primary | |||||
| if (isPrimaryCached) { | |||||
| engineOutputBuffer.clear(); | |||||
| } | |||||
| // Set up sample rate converter | |||||
| outputSrc.setRates(deviceSampleRate, engineSampleRate); | |||||
| outputSrc.setChannels(deviceNumInputs); | |||||
| // Convert audio input -> engine output | |||||
| dsp::Frame<NUM_AUDIO_OUTPUTS> audioInputBuffer[frames]; | |||||
| std::memset(audioInputBuffer, 0, sizeof(audioInputBuffer)); | |||||
| for (int i = 0; i < frames; i++) { | |||||
| for (int j = 0; j < deviceNumInputs; j++) { | |||||
| float v = input[i * inputStride + j]; | |||||
| audioInputBuffer[i].samples[j] = v; | |||||
| } | |||||
| } | |||||
| int audioInputFrames = frames; | |||||
| int outputFrames = engineOutputBuffer.capacity(); | |||||
| outputSrc.process(audioInputBuffer, &audioInputFrames, engineOutputBuffer.endData(), &outputFrames); | |||||
| engineOutputBuffer.endIncr(outputFrames); | |||||
| // Request exactly as many frames as we have in the engine output buffer. | |||||
| requestedEngineFrames = engineOutputBuffer.size(); | |||||
| } | |||||
| else { | |||||
| // Upper bound on number of frames so that `audioOutputFrames >= frames` when processOutput() is called. | |||||
| requestedEngineFrames = std::max((int) std::ceil(frames * sampleRateRatio) - (int) engineInputBuffer.size(), 0); | |||||
| } | |||||
| } | |||||
| void processBuffer(const float* input, int inputStride, float* output, int outputStride, int frames) override { | |||||
| // Step engine | |||||
| if (isPrimary() && requestedEngineFrames > 0) { | |||||
| // DEBUG("%p: %d block, stepping %d", this, frames, requestedEngineFrames); | |||||
| APP->engine->stepBlock(requestedEngineFrames); | |||||
| } | |||||
| } | |||||
| void processOutput(float* output, int outputStride, int frames) override { | |||||
| // bool isPrimaryCached = isPrimary(); | |||||
| float engineSampleRate = APP->engine->getSampleRate(); | |||||
| float sampleRateRatio = engineSampleRate / deviceSampleRate; | |||||
| if (deviceNumOutputs > 0) { | |||||
| // Set up sample rate converter | |||||
| inputSrc.setRates(engineSampleRate, deviceSampleRate); | |||||
| inputSrc.setChannels(deviceNumOutputs); | |||||
| // Convert engine input -> audio output | |||||
| dsp::Frame<NUM_AUDIO_OUTPUTS> audioOutputBuffer[frames]; | |||||
| int inputFrames = engineInputBuffer.size(); | |||||
| int audioOutputFrames = frames; | |||||
| inputSrc.process(engineInputBuffer.startData(), &inputFrames, audioOutputBuffer, &audioOutputFrames); | |||||
| engineInputBuffer.startIncr(inputFrames); | |||||
| // Copy the audio output buffer | |||||
| for (int i = 0; i < audioOutputFrames; i++) { | |||||
| for (int j = 0; j < deviceNumOutputs; j++) { | |||||
| float v = audioOutputBuffer[i].samples[j]; | |||||
| v = clamp(v, -1.f, 1.f); | |||||
| output[i * outputStride + j] = v; | |||||
| } | |||||
| } | |||||
| // Fill the rest of the audio output buffer with zeros | |||||
| for (int i = audioOutputFrames; i < frames; i++) { | |||||
| for (int j = 0; j < deviceNumOutputs; j++) { | |||||
| output[i * outputStride + j] = 0.f; | |||||
| } | |||||
| } | |||||
| } | |||||
| // DEBUG("%p: %d block, engineInputBuffer left %d", this, frames, (int) engineInputBuffer.size()); | |||||
| // If the engine input buffer is too full, clear it to keep latency low. | |||||
| int maxEngineFrames = (int) std::ceil(frames * sampleRateRatio * 2.0) - 1; | |||||
| if ((int) engineInputBuffer.size() > maxEngineFrames) { | |||||
| engineInputBuffer.clear(); | |||||
| // DEBUG("%p: clearing engine input", this); | |||||
| } | |||||
| // DEBUG("%p %s:\tframes %d requestedEngineFrames %d\toutputBuffer %d engineInputBuffer %d\t", this, isPrimaryCached ? "primary" : "secondary", frames, requestedEngineFrames, engineOutputBuffer.size(), engineInputBuffer.size()); | |||||
| } | |||||
| void onStartStream() override { | |||||
| deviceNumInputs = std::min(getNumInputs(), NUM_AUDIO_OUTPUTS); | |||||
| deviceNumOutputs = std::min(getNumOutputs(), NUM_AUDIO_INPUTS); | |||||
| deviceSampleRate = getSampleRate(); | |||||
| engineInputBuffer.clear(); | |||||
| engineOutputBuffer.clear(); | |||||
| // DEBUG("onStartStream %d %d %f", deviceNumInputs, deviceNumOutputs, deviceSampleRate); | |||||
| } | |||||
| void onStopStream() override { | |||||
| deviceNumInputs = 0; | |||||
| deviceNumOutputs = 0; | |||||
| deviceSampleRate = 0.f; | |||||
| engineInputBuffer.clear(); | |||||
| engineOutputBuffer.clear(); | |||||
| // DEBUG("onStopStream"); | |||||
| } | |||||
| }; | }; | ||||
| @@ -432,7 +450,7 @@ struct AudioInterfaceWidget : ModuleWidget { | |||||
| AudioWidget* audioWidget = createWidget<AudioWidget>(mm2px(Vec(3.2122073, 14.837339))); | AudioWidget* audioWidget = createWidget<AudioWidget>(mm2px(Vec(3.2122073, 14.837339))); | ||||
| audioWidget->box.size = mm2px(Vec(44, 28)); | audioWidget->box.size = mm2px(Vec(44, 28)); | ||||
| audioWidget->setAudioPort(module); | |||||
| audioWidget->setAudioPort(module ? &module->port : NULL); | |||||
| addChild(audioWidget); | addChild(audioWidget); | ||||
| } | } | ||||
| else if (NUM_AUDIO_INPUTS == 16 && NUM_AUDIO_OUTPUTS == 16) { | else if (NUM_AUDIO_INPUTS == 16 && NUM_AUDIO_OUTPUTS == 16) { | ||||
| @@ -496,7 +514,7 @@ struct AudioInterfaceWidget : ModuleWidget { | |||||
| AudioWidget* audioWidget = createWidget<AudioWidget>(mm2px(Vec(2.57, 14.839))); | AudioWidget* audioWidget = createWidget<AudioWidget>(mm2px(Vec(2.57, 14.839))); | ||||
| audioWidget->box.size = mm2px(Vec(91.382, 28.0)); | audioWidget->box.size = mm2px(Vec(91.382, 28.0)); | ||||
| audioWidget->setAudioPort(module); | |||||
| audioWidget->setAudioPort(module ? &module->port : NULL); | |||||
| addChild(audioWidget); | addChild(audioWidget); | ||||
| } | } | ||||
| else if (NUM_AUDIO_INPUTS == 2 && NUM_AUDIO_OUTPUTS == 2) { | else if (NUM_AUDIO_INPUTS == 2 && NUM_AUDIO_OUTPUTS == 2) { | ||||
| @@ -530,7 +548,7 @@ struct AudioInterfaceWidget : ModuleWidget { | |||||
| AudioDeviceWidget* audioWidget = createWidget<AudioDeviceWidget>(mm2px(Vec(2.135, 14.259))); | AudioDeviceWidget* audioWidget = createWidget<AudioDeviceWidget>(mm2px(Vec(2.135, 14.259))); | ||||
| audioWidget->box.size = mm2px(Vec(21.128, 6.725)); | audioWidget->box.size = mm2px(Vec(21.128, 6.725)); | ||||
| audioWidget->setAudioPort(module); | |||||
| audioWidget->setAudioPort(module ? &module->port : NULL); | |||||
| // Adjust deviceChoice position | // Adjust deviceChoice position | ||||
| audioWidget->deviceChoice->textOffset = Vec(6, 14); | audioWidget->deviceChoice->textOffset = Vec(6, 14); | ||||
| addChild(audioWidget); | addChild(audioWidget); | ||||