21 |
* MA 02111-1307 USA * |
* MA 02111-1307 USA * |
22 |
***************************************************************************/ |
***************************************************************************/ |
23 |
|
|
|
#include "EGADSR.h" |
|
|
#include "Manipulator.h" |
|
24 |
#include "../../common/Features.h" |
#include "../../common/Features.h" |
25 |
#include "Synthesizer.h" |
#include "Synthesizer.h" |
26 |
|
#include "Profiler.h" |
27 |
|
|
28 |
#include "Voice.h" |
#include "Voice.h" |
29 |
|
|
31 |
|
|
32 |
const float Voice::FILTER_CUTOFF_COEFF(CalculateFilterCutoffCoeff()); |
const float Voice::FILTER_CUTOFF_COEFF(CalculateFilterCutoffCoeff()); |
33 |
|
|
|
const int Voice::FILTER_UPDATE_MASK(CalculateFilterUpdateMask()); |
|
|
|
|
34 |
float Voice::CalculateFilterCutoffCoeff() { |
float Voice::CalculateFilterCutoffCoeff() { |
35 |
return log(CONFIG_FILTER_CUTOFF_MAX / CONFIG_FILTER_CUTOFF_MIN); |
return log(CONFIG_FILTER_CUTOFF_MAX / CONFIG_FILTER_CUTOFF_MIN); |
36 |
} |
} |
37 |
|
|
|
int Voice::CalculateFilterUpdateMask() { |
|
|
if (CONFIG_FILTER_UPDATE_STEPS <= 0) return 0; |
|
|
int power_of_two; |
|
|
for (power_of_two = 0; 1<<power_of_two < CONFIG_FILTER_UPDATE_STEPS; power_of_two++); |
|
|
return (1 << power_of_two) - 1; |
|
|
} |
|
|
|
|
38 |
Voice::Voice() { |
Voice::Voice() { |
39 |
pEngine = NULL; |
pEngine = NULL; |
40 |
pDiskThread = NULL; |
pDiskThread = NULL; |
41 |
PlaybackState = playback_state_end; |
PlaybackState = playback_state_end; |
42 |
pEG1 = NULL; |
pLFO1 = new LFOUnsigned(1.0f); // amplitude EG (0..1 range) |
43 |
pEG2 = NULL; |
pLFO2 = new LFOUnsigned(1.0f); // filter EG (0..1 range) |
44 |
pEG3 = NULL; |
pLFO3 = new LFOSigned(1200.0f); // pitch EG (-1200..+1200 range) |
|
pVCAManipulator = NULL; |
|
|
pVCFCManipulator = NULL; |
|
|
pVCOManipulator = NULL; |
|
|
pLFO1 = NULL; |
|
|
pLFO2 = NULL; |
|
|
pLFO3 = NULL; |
|
45 |
KeyGroup = 0; |
KeyGroup = 0; |
46 |
SynthesisMode = 0; // set all mode bits to 0 first |
SynthesisMode = 0; // set all mode bits to 0 first |
47 |
// select synthesis implementation (currently either pure C++ or MMX+SSE(1)) |
// select synthesis implementation (currently either pure C++ or MMX+SSE(1)) |
50 |
#else |
#else |
51 |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); |
52 |
#endif |
#endif |
53 |
SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, true); |
SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, Profiler::isEnabled()); |
54 |
|
|
55 |
FilterLeft.Reset(); |
finalSynthesisParameters.filterLeft.Reset(); |
56 |
FilterRight.Reset(); |
finalSynthesisParameters.filterRight.Reset(); |
57 |
} |
} |
58 |
|
|
59 |
Voice::~Voice() { |
Voice::~Voice() { |
|
if (pEG1) delete pEG1; |
|
|
if (pEG2) delete pEG2; |
|
|
if (pEG3) delete pEG3; |
|
60 |
if (pLFO1) delete pLFO1; |
if (pLFO1) delete pLFO1; |
61 |
if (pLFO2) delete pLFO2; |
if (pLFO2) delete pLFO2; |
62 |
if (pLFO3) delete pLFO3; |
if (pLFO3) delete pLFO3; |
|
if (pVCAManipulator) delete pVCAManipulator; |
|
|
if (pVCFCManipulator) delete pVCFCManipulator; |
|
|
if (pVCOManipulator) delete pVCOManipulator; |
|
63 |
} |
} |
64 |
|
|
65 |
void Voice::SetEngine(Engine* pEngine) { |
void Voice::SetEngine(Engine* pEngine) { |
66 |
this->pEngine = pEngine; |
this->pEngine = pEngine; |
|
|
|
|
// delete old objects |
|
|
if (pEG1) delete pEG1; |
|
|
if (pEG2) delete pEG2; |
|
|
if (pEG3) delete pEG3; |
|
|
if (pVCAManipulator) delete pVCAManipulator; |
|
|
if (pVCFCManipulator) delete pVCFCManipulator; |
|
|
if (pVCOManipulator) delete pVCOManipulator; |
|
|
if (pLFO1) delete pLFO1; |
|
|
if (pLFO2) delete pLFO2; |
|
|
if (pLFO3) delete pLFO3; |
|
|
|
|
|
// create new ones |
|
|
pEG1 = new EGADSR(pEngine, Event::destination_vca); |
|
|
pEG2 = new EGADSR(pEngine, Event::destination_vcfc); |
|
|
pEG3 = new EGDecay(pEngine, Event::destination_vco); |
|
|
pVCAManipulator = new VCAManipulator(pEngine); |
|
|
pVCFCManipulator = new VCFCManipulator(pEngine); |
|
|
pVCOManipulator = new VCOManipulator(pEngine); |
|
|
pLFO1 = new LFO<gig::VCAManipulator>(0.0f, 1.0f, LFO<VCAManipulator>::propagation_top_down, pVCAManipulator, pEngine->pEventPool); |
|
|
pLFO2 = new LFO<gig::VCFCManipulator>(0.0f, 1.0f, LFO<VCFCManipulator>::propagation_top_down, pVCFCManipulator, pEngine->pEventPool); |
|
|
pLFO3 = new LFO<gig::VCOManipulator>(-1200.0f, 1200.0f, LFO<VCOManipulator>::propagation_middle_balanced, pVCOManipulator, pEngine->pEventPool); // +-1 octave (+-1200 cents) max. |
|
|
|
|
67 |
this->pDiskThread = pEngine->pDiskThread; |
this->pDiskThread = pEngine->pDiskThread; |
68 |
dmsg(6,("Voice::SetEngine()\n")); |
dmsg(6,("Voice::SetEngine()\n")); |
69 |
} |
} |
139 |
PanLeft = 1.0f - float(RTMath::Max(pDimRgn->Pan, 0)) / 63.0f; |
PanLeft = 1.0f - float(RTMath::Max(pDimRgn->Pan, 0)) / 63.0f; |
140 |
PanRight = 1.0f - float(RTMath::Min(pDimRgn->Pan, 0)) / -64.0f; |
PanRight = 1.0f - float(RTMath::Min(pDimRgn->Pan, 0)) / -64.0f; |
141 |
|
|
142 |
Pos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) |
finalSynthesisParameters.dPos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) |
143 |
|
Pos = pDimRgn->SampleStartOffset; |
144 |
|
|
145 |
// Check if the sample needs disk streaming or is too short for that |
// Check if the sample needs disk streaming or is too short for that |
146 |
long cachedsamples = pSample->GetCache().Size / pSample->FrameSize; |
long cachedsamples = pSample->GetCache().Size / pSample->FrameSize; |
151 |
|
|
152 |
// check if there's a loop defined which completely fits into the cached (RAM) part of the sample |
// check if there's a loop defined which completely fits into the cached (RAM) part of the sample |
153 |
if (pSample->Loops && pSample->LoopEnd <= MaxRAMPos) { |
if (pSample->Loops && pSample->LoopEnd <= MaxRAMPos) { |
154 |
RAMLoop = true; |
RAMLoop = true; |
155 |
LoopCyclesLeft = pSample->LoopPlayCount; |
loop.uiTotalCycles = pSample->LoopPlayCount; |
156 |
|
loop.uiCyclesLeft = pSample->LoopPlayCount; |
157 |
|
loop.uiStart = pSample->LoopStart; |
158 |
|
loop.uiEnd = pSample->LoopEnd; |
159 |
|
loop.uiSize = pSample->LoopSize; |
160 |
} |
} |
161 |
else RAMLoop = false; |
else RAMLoop = false; |
162 |
|
|
170 |
else { // RAM only voice |
else { // RAM only voice |
171 |
MaxRAMPos = cachedsamples; |
MaxRAMPos = cachedsamples; |
172 |
if (pSample->Loops) { |
if (pSample->Loops) { |
173 |
RAMLoop = true; |
RAMLoop = true; |
174 |
LoopCyclesLeft = pSample->LoopPlayCount; |
loop.uiCyclesLeft = pSample->LoopPlayCount; |
175 |
} |
} |
176 |
else RAMLoop = false; |
else RAMLoop = false; |
177 |
dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); |
dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); |
182 |
{ |
{ |
183 |
double pitchbasecents = pDimRgn->FineTune + (int) pEngine->ScaleTuning[MIDIKey % 12]; |
double pitchbasecents = pDimRgn->FineTune + (int) pEngine->ScaleTuning[MIDIKey % 12]; |
184 |
if (pDimRgn->PitchTrack) pitchbasecents += (MIDIKey - (int) pDimRgn->UnityNote) * 100; |
if (pDimRgn->PitchTrack) pitchbasecents += (MIDIKey - (int) pDimRgn->UnityNote) * 100; |
185 |
this->PitchBase = RTMath::CentsToFreqRatio(pitchbasecents) * (double(pSample->SamplesPerSecond) / double(pEngine->pAudioOutputDevice->SampleRate())); |
this->PitchBase = RTMath::CentsToFreqRatio(pitchbasecents) * (double(pSample->SamplesPerSecond) / double(pEngine->SampleRate)); |
186 |
this->PitchBend = RTMath::CentsToFreqRatio(((double) PitchBend / 8192.0) * 200.0); // pitchbend wheel +-2 semitones = 200 cents |
this->PitchBend = RTMath::CentsToFreqRatio(((double) PitchBend / 8192.0) * 200.0); // pitchbend wheel +-2 semitones = 200 cents |
187 |
} |
} |
188 |
|
|
217 |
double eg1decay = (pDimRgn->EG1ControllerDecayInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG1ControllerDecayInfluence) * eg1controllervalue : 1.0; |
double eg1decay = (pDimRgn->EG1ControllerDecayInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG1ControllerDecayInfluence) * eg1controllervalue : 1.0; |
218 |
double eg1release = (pDimRgn->EG1ControllerReleaseInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG1ControllerReleaseInfluence) * eg1controllervalue : 1.0; |
double eg1release = (pDimRgn->EG1ControllerReleaseInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG1ControllerReleaseInfluence) * eg1controllervalue : 1.0; |
219 |
|
|
220 |
pEG1->Trigger(pDimRgn->EG1PreAttack, |
EG1.trigger(pDimRgn->EG1PreAttack, |
221 |
pDimRgn->EG1Attack * eg1attack, |
pDimRgn->EG1Attack * eg1attack, |
222 |
pDimRgn->EG1Hold, |
pDimRgn->EG1Hold, |
223 |
pSample->LoopStart, |
pDimRgn->EG1Decay1 * eg1decay * velrelease, |
224 |
pDimRgn->EG1Decay1 * eg1decay * velrelease, |
pDimRgn->EG1Decay2 * eg1decay * velrelease, |
225 |
pDimRgn->EG1Decay2 * eg1decay * velrelease, |
pDimRgn->EG1InfiniteSustain, |
226 |
pDimRgn->EG1InfiniteSustain, |
pDimRgn->EG1Sustain, |
227 |
pDimRgn->EG1Sustain, |
pDimRgn->EG1Release * eg1release * velrelease, |
228 |
pDimRgn->EG1Release * eg1release * velrelease, |
velocityAttenuation, |
229 |
// the SSE synthesis implementation requires |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
|
// the vca start to be 16 byte aligned |
|
|
SYNTHESIS_MODE_GET_IMPLEMENTATION(SynthesisMode) ? |
|
|
Delay & 0xfffffffc : Delay, |
|
|
velocityAttenuation); |
|
230 |
} |
} |
231 |
|
|
232 |
|
|
255 |
double eg2decay = (pDimRgn->EG2ControllerDecayInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG2ControllerDecayInfluence) * eg2controllervalue : 1.0; |
double eg2decay = (pDimRgn->EG2ControllerDecayInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG2ControllerDecayInfluence) * eg2controllervalue : 1.0; |
256 |
double eg2release = (pDimRgn->EG2ControllerReleaseInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG2ControllerReleaseInfluence) * eg2controllervalue : 1.0; |
double eg2release = (pDimRgn->EG2ControllerReleaseInfluence) ? 1 + 0.00775 * (double) (1 << pDimRgn->EG2ControllerReleaseInfluence) * eg2controllervalue : 1.0; |
257 |
|
|
258 |
pEG2->Trigger(pDimRgn->EG2PreAttack, |
EG2.trigger(pDimRgn->EG2PreAttack, |
259 |
pDimRgn->EG2Attack * eg2attack, |
pDimRgn->EG2Attack * eg2attack, |
260 |
false, |
false, |
261 |
pSample->LoopStart, |
pDimRgn->EG2Decay1 * eg2decay * velrelease, |
262 |
pDimRgn->EG2Decay1 * eg2decay * velrelease, |
pDimRgn->EG2Decay2 * eg2decay * velrelease, |
263 |
pDimRgn->EG2Decay2 * eg2decay * velrelease, |
pDimRgn->EG2InfiniteSustain, |
264 |
pDimRgn->EG2InfiniteSustain, |
pDimRgn->EG2Sustain, |
265 |
pDimRgn->EG2Sustain, |
pDimRgn->EG2Release * eg2release * velrelease, |
266 |
pDimRgn->EG2Release * eg2release * velrelease, |
velocityAttenuation, |
267 |
Delay, |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
|
velocityAttenuation); |
|
268 |
} |
} |
269 |
|
|
270 |
|
|
271 |
// setup EG 3 (VCO EG) |
// setup EG 3 (VCO EG) |
272 |
{ |
{ |
273 |
double eg3depth = RTMath::CentsToFreqRatio(pDimRgn->EG3Depth); |
double eg3depth = RTMath::CentsToFreqRatio(pDimRgn->EG3Depth); |
274 |
pEG3->Trigger(eg3depth, pDimRgn->EG3Attack, Delay); |
EG3.trigger(eg3depth, pDimRgn->EG3Attack, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
275 |
} |
} |
276 |
|
|
277 |
|
|
309 |
pLFO1->ExtController = 0; // no external controller |
pLFO1->ExtController = 0; // no external controller |
310 |
bLFO1Enabled = false; |
bLFO1Enabled = false; |
311 |
} |
} |
312 |
if (bLFO1Enabled) pLFO1->Trigger(pDimRgn->LFO1Frequency, |
if (bLFO1Enabled) pLFO1->trigger(pDimRgn->LFO1Frequency, |
313 |
|
start_level_max, |
314 |
lfo1_internal_depth, |
lfo1_internal_depth, |
315 |
pDimRgn->LFO1ControlDepth, |
pDimRgn->LFO1ControlDepth, |
|
pEngineChannel->ControllerTable[pLFO1->ExtController], |
|
316 |
pDimRgn->LFO1FlipPhase, |
pDimRgn->LFO1FlipPhase, |
317 |
pEngine->SampleRate, |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
|
Delay); |
|
318 |
} |
} |
319 |
|
|
320 |
|
|
352 |
pLFO2->ExtController = 0; // no external controller |
pLFO2->ExtController = 0; // no external controller |
353 |
bLFO2Enabled = false; |
bLFO2Enabled = false; |
354 |
} |
} |
355 |
if (bLFO2Enabled) pLFO2->Trigger(pDimRgn->LFO2Frequency, |
if (bLFO2Enabled) pLFO2->trigger(pDimRgn->LFO2Frequency, |
356 |
|
start_level_max, |
357 |
lfo2_internal_depth, |
lfo2_internal_depth, |
358 |
pDimRgn->LFO2ControlDepth, |
pDimRgn->LFO2ControlDepth, |
|
pEngineChannel->ControllerTable[pLFO2->ExtController], |
|
359 |
pDimRgn->LFO2FlipPhase, |
pDimRgn->LFO2FlipPhase, |
360 |
pEngine->SampleRate, |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
|
Delay); |
|
361 |
} |
} |
362 |
|
|
363 |
|
|
395 |
pLFO3->ExtController = 0; // no external controller |
pLFO3->ExtController = 0; // no external controller |
396 |
bLFO3Enabled = false; |
bLFO3Enabled = false; |
397 |
} |
} |
398 |
if (bLFO3Enabled) pLFO3->Trigger(pDimRgn->LFO3Frequency, |
if (bLFO3Enabled) pLFO3->trigger(pDimRgn->LFO3Frequency, |
399 |
|
start_level_mid, |
400 |
lfo3_internal_depth, |
lfo3_internal_depth, |
401 |
pDimRgn->LFO3ControlDepth, |
pDimRgn->LFO3ControlDepth, |
|
pEngineChannel->ControllerTable[pLFO3->ExtController], |
|
402 |
false, |
false, |
403 |
pEngine->SampleRate, |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
|
Delay); |
|
404 |
} |
} |
405 |
|
|
406 |
|
|
473 |
#endif // CONFIG_OVERRIDE_RESONANCE_CTRL |
#endif // CONFIG_OVERRIDE_RESONANCE_CTRL |
474 |
|
|
475 |
#ifndef CONFIG_OVERRIDE_FILTER_TYPE |
#ifndef CONFIG_OVERRIDE_FILTER_TYPE |
476 |
FilterLeft.SetType(pDimRgn->VCFType); |
finalSynthesisParameters.filterLeft.SetType(pDimRgn->VCFType); |
477 |
FilterRight.SetType(pDimRgn->VCFType); |
finalSynthesisParameters.filterRight.SetType(pDimRgn->VCFType); |
478 |
#else // override filter type |
#else // override filter type |
479 |
FilterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
FilterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
480 |
FilterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
FilterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
494 |
if (VCFCutoffCtrl.controller) { |
if (VCFCutoffCtrl.controller) { |
495 |
cvalue = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
cvalue = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
496 |
if (pDimRgn->VCFCutoffControllerInvert) cvalue = 127 - cvalue; |
if (pDimRgn->VCFCutoffControllerInvert) cvalue = 127 - cvalue; |
497 |
|
// VCFVelocityScale in this case means Minimum cutoff |
498 |
if (cvalue < pDimRgn->VCFVelocityScale) cvalue = pDimRgn->VCFVelocityScale; |
if (cvalue < pDimRgn->VCFVelocityScale) cvalue = pDimRgn->VCFVelocityScale; |
499 |
} |
} |
500 |
else { |
else { |
502 |
} |
} |
503 |
cutoff *= float(cvalue) * 0.00787402f; // (1 / 127) |
cutoff *= float(cvalue) * 0.00787402f; // (1 / 127) |
504 |
if (cutoff > 1.0) cutoff = 1.0; |
if (cutoff > 1.0) cutoff = 1.0; |
505 |
cutoff = exp(cutoff * FILTER_CUTOFF_COEFF) * CONFIG_FILTER_CUTOFF_MIN; |
cutoff = (cutoff < 0.5 ? cutoff * 4826 - 1 : cutoff * 5715 - 449); |
506 |
|
if (cutoff < 1.0) cutoff = 1.0; |
507 |
|
|
508 |
// calculate resonance |
// calculate resonance |
509 |
float resonance = (float) VCFResonanceCtrl.value * 0.00787f; // 0.0..1.0 |
float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : pDimRgn->VCFResonance) * 0.00787f; // 0.0..1.0 |
|
if (pDimRgn->VCFKeyboardTracking) { |
|
|
resonance += (float) (itNoteOnEvent->Param.Note.Key - pDimRgn->VCFKeyboardTrackingBreakpoint) * 0.00787f; |
|
|
} |
|
|
Constrain(resonance, 0.0, 1.0); // correct resonance if outside allowed value range (0.0..1.0) |
|
510 |
|
|
511 |
VCFCutoffCtrl.fvalue = cutoff - CONFIG_FILTER_CUTOFF_MIN; |
VCFCutoffCtrl.fvalue = cutoff - 1.0; |
512 |
VCFResonanceCtrl.fvalue = resonance; |
VCFResonanceCtrl.fvalue = resonance; |
|
|
|
|
FilterUpdateCounter = -1; |
|
513 |
} |
} |
514 |
else { |
else { |
515 |
VCFCutoffCtrl.controller = 0; |
VCFCutoffCtrl.controller = 0; |
533 |
void Voice::Render(uint Samples) { |
void Voice::Render(uint Samples) { |
534 |
|
|
535 |
// select default values for synthesis mode bits |
// select default values for synthesis mode bits |
|
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, (PitchBase * PitchBend) != 1.0f); |
|
|
SYNTHESIS_MODE_SET_CONSTPITCH(SynthesisMode, true); |
|
536 |
SYNTHESIS_MODE_SET_LOOP(SynthesisMode, false); |
SYNTHESIS_MODE_SET_LOOP(SynthesisMode, false); |
537 |
|
|
|
// Reset the synthesis parameter matrix |
|
|
|
|
|
#if CONFIG_PROCESS_MUTED_CHANNELS |
|
|
pEngine->ResetSynthesisParameters(Event::destination_vca, this->Volume * this->CrossfadeVolume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume)); |
|
|
#else |
|
|
pEngine->ResetSynthesisParameters(Event::destination_vca, this->Volume * this->CrossfadeVolume * pEngineChannel->GlobalVolume); |
|
|
#endif |
|
|
pEngine->ResetSynthesisParameters(Event::destination_vco, this->PitchBase); |
|
|
pEngine->ResetSynthesisParameters(Event::destination_vcfc, VCFCutoffCtrl.fvalue); |
|
|
pEngine->ResetSynthesisParameters(Event::destination_vcfr, VCFResonanceCtrl.fvalue); |
|
|
|
|
|
// Apply events to the synthesis parameter matrix |
|
|
ProcessEvents(Samples); |
|
|
|
|
|
// Let all modulators write their parameter changes to the synthesis parameter matrix for the current audio fragment |
|
|
pEG1->Process(Samples, pEngineChannel->pMIDIKeyInfo[MIDIKey].pEvents, itTriggerEvent, this->Pos, this->PitchBase * this->PitchBend, itKillEvent); |
|
|
pEG2->Process(Samples, pEngineChannel->pMIDIKeyInfo[MIDIKey].pEvents, itTriggerEvent, this->Pos, this->PitchBase * this->PitchBend); |
|
|
if (pEG3->Process(Samples)) { // if pitch EG is active |
|
|
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, true); |
|
|
SYNTHESIS_MODE_SET_CONSTPITCH(SynthesisMode, false); |
|
|
} |
|
|
if (bLFO1Enabled) pLFO1->Process(Samples); |
|
|
if (bLFO2Enabled) pLFO2->Process(Samples); |
|
|
if (bLFO3Enabled) { |
|
|
if (pLFO3->Process(Samples)) { // if pitch LFO modulation is active |
|
|
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, true); |
|
|
SYNTHESIS_MODE_SET_CONSTPITCH(SynthesisMode, false); |
|
|
} |
|
|
} |
|
|
|
|
|
if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) |
|
|
CalculateBiquadParameters(Samples); // calculate the final biquad filter parameters |
|
|
|
|
538 |
switch (this->PlaybackState) { |
switch (this->PlaybackState) { |
539 |
|
|
540 |
case playback_state_init: |
case playback_state_init: |
549 |
|
|
550 |
if (DiskVoice) { |
if (DiskVoice) { |
551 |
// check if we reached the allowed limit of the sample RAM cache |
// check if we reached the allowed limit of the sample RAM cache |
552 |
if (Pos > MaxRAMPos) { |
if (finalSynthesisParameters.dPos > MaxRAMPos) { |
553 |
dmsg(5,("Voice: switching to disk playback (Pos=%f)\n", Pos)); |
dmsg(5,("Voice: switching to disk playback (Pos=%f)\n", finalSynthesisParameters.dPos)); |
554 |
this->PlaybackState = playback_state_disk; |
this->PlaybackState = playback_state_disk; |
555 |
} |
} |
556 |
} |
} else if (finalSynthesisParameters.dPos >= pSample->GetCache().Size / pSample->FrameSize) { |
|
else if (Pos >= pSample->GetCache().Size / pSample->FrameSize) { |
|
557 |
this->PlaybackState = playback_state_end; |
this->PlaybackState = playback_state_end; |
558 |
} |
} |
559 |
} |
} |
568 |
KillImmediately(); |
KillImmediately(); |
569 |
return; |
return; |
570 |
} |
} |
571 |
DiskStreamRef.pStream->IncrementReadPos(pSample->Channels * (int(Pos) - MaxRAMPos)); |
DiskStreamRef.pStream->IncrementReadPos(pSample->Channels * (int(finalSynthesisParameters.dPos) - MaxRAMPos)); |
572 |
Pos -= int(Pos); |
finalSynthesisParameters.dPos -= int(finalSynthesisParameters.dPos); |
573 |
RealSampleWordsLeftToRead = -1; // -1 means no silence has been added yet |
RealSampleWordsLeftToRead = -1; // -1 means no silence has been added yet |
574 |
} |
} |
575 |
|
|
590 |
// render current audio fragment |
// render current audio fragment |
591 |
Synthesize(Samples, ptr, Delay); |
Synthesize(Samples, ptr, Delay); |
592 |
|
|
593 |
const int iPos = (int) Pos; |
const int iPos = (int) finalSynthesisParameters.dPos; |
594 |
const int readSampleWords = iPos * pSample->Channels; // amount of sample words actually been read |
const int readSampleWords = iPos * pSample->Channels; // amount of sample words actually been read |
595 |
DiskStreamRef.pStream->IncrementReadPos(readSampleWords); |
DiskStreamRef.pStream->IncrementReadPos(readSampleWords); |
596 |
Pos -= iPos; // just keep fractional part of Pos |
finalSynthesisParameters.dPos -= iPos; // just keep fractional part of playback position |
597 |
|
|
598 |
// change state of voice to 'end' if we really reached the end of the sample data |
// change state of voice to 'end' if we really reached the end of the sample data |
599 |
if (RealSampleWordsLeftToRead >= 0) { |
if (RealSampleWordsLeftToRead >= 0) { |
608 |
break; |
break; |
609 |
} |
} |
610 |
|
|
|
// Reset synthesis event lists (except VCO, as VCO events apply channel wide currently) |
|
|
pEngineChannel->pSynthesisEvents[Event::destination_vca]->clear(); |
|
|
pEngineChannel->pSynthesisEvents[Event::destination_vcfc]->clear(); |
|
|
pEngineChannel->pSynthesisEvents[Event::destination_vcfr]->clear(); |
|
|
|
|
611 |
// Reset delay |
// Reset delay |
612 |
Delay = 0; |
Delay = 0; |
613 |
|
|
614 |
itTriggerEvent = Pool<Event>::Iterator(); |
itTriggerEvent = Pool<Event>::Iterator(); |
615 |
|
|
616 |
// If sample stream or release stage finished, kill the voice |
// If sample stream or release stage finished, kill the voice |
617 |
if (PlaybackState == playback_state_end || pEG1->GetStage() == EGADSR::stage_end) KillImmediately(); |
if (PlaybackState == playback_state_end || EG1.getSegmentType() == EGADSR::segment_end) KillImmediately(); |
618 |
} |
} |
619 |
|
|
620 |
/** |
/** |
622 |
* suspended / not running. |
* suspended / not running. |
623 |
*/ |
*/ |
624 |
void Voice::Reset() { |
void Voice::Reset() { |
625 |
pLFO1->Reset(); |
finalSynthesisParameters.filterLeft.Reset(); |
626 |
pLFO2->Reset(); |
finalSynthesisParameters.filterRight.Reset(); |
|
pLFO3->Reset(); |
|
|
FilterLeft.Reset(); |
|
|
FilterRight.Reset(); |
|
627 |
DiskStreamRef.pStream = NULL; |
DiskStreamRef.pStream = NULL; |
628 |
DiskStreamRef.hStream = 0; |
DiskStreamRef.hStream = 0; |
629 |
DiskStreamRef.State = Stream::state_unused; |
DiskStreamRef.State = Stream::state_unused; |
634 |
} |
} |
635 |
|
|
636 |
/** |
/** |
637 |
* Process the control change event lists of the engine for the current |
* Process given list of MIDI note on, note off and sustain pedal events |
638 |
* audio fragment. Event values will be applied to the synthesis parameter |
* for the given time. |
|
* matrix. |
|
639 |
* |
* |
640 |
* @param Samples - number of samples to be rendered in this audio fragment cycle |
* @param itEvent - iterator pointing to the next event to be processed |
641 |
|
* @param End - youngest time stamp where processing should be stopped |
642 |
*/ |
*/ |
643 |
void Voice::ProcessEvents(uint Samples) { |
void Voice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) { |
644 |
|
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
645 |
|
if (itEvent->Type == Event::type_release) { |
646 |
|
EG1.update(EGADSR::event_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
647 |
|
EG2.update(EGADSR::event_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
648 |
|
} else if (itEvent->Type == Event::type_cancel_release) { |
649 |
|
EG1.update(EGADSR::event_cancel_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
650 |
|
EG2.update(EGADSR::event_cancel_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
651 |
|
} |
652 |
|
} |
653 |
|
} |
654 |
|
|
655 |
// dispatch control change events |
/** |
656 |
RTList<Event>::Iterator itCCEvent = pEngineChannel->pCCEvents->first(); |
* Process given list of MIDI control change and pitch bend events for |
657 |
if (Delay) { // skip events that happened before this voice was triggered |
* the given time. |
658 |
while (itCCEvent && itCCEvent->FragmentPos() <= Delay) ++itCCEvent; |
* |
659 |
} |
* @param itEvent - iterator pointing to the next event to be processed |
660 |
while (itCCEvent) { |
* @param End - youngest time stamp where processing should be stopped |
661 |
if (itCCEvent->Param.CC.Controller) { // if valid MIDI controller |
*/ |
662 |
if (itCCEvent->Param.CC.Controller == VCFCutoffCtrl.controller) { |
void Voice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) { |
663 |
*pEngineChannel->pSynthesisEvents[Event::destination_vcfc]->allocAppend() = *itCCEvent; |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
664 |
} |
if (itEvent->Type == Event::type_control_change && |
665 |
if (itCCEvent->Param.CC.Controller == VCFResonanceCtrl.controller) { |
itEvent->Param.CC.Controller) { // if (valid) MIDI control change event |
666 |
*pEngineChannel->pSynthesisEvents[Event::destination_vcfr]->allocAppend() = *itCCEvent; |
if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) { |
667 |
|
processCutoffEvent(itEvent); |
668 |
|
} |
669 |
|
if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) { |
670 |
|
processResonanceEvent(itEvent); |
671 |
} |
} |
672 |
if (itCCEvent->Param.CC.Controller == pLFO1->ExtController) { |
if (itEvent->Param.CC.Controller == pLFO1->ExtController) { |
673 |
pLFO1->SendEvent(itCCEvent); |
pLFO1->update(itEvent->Param.CC.Value); |
674 |
} |
} |
675 |
if (itCCEvent->Param.CC.Controller == pLFO2->ExtController) { |
if (itEvent->Param.CC.Controller == pLFO2->ExtController) { |
676 |
pLFO2->SendEvent(itCCEvent); |
pLFO2->update(itEvent->Param.CC.Value); |
677 |
} |
} |
678 |
if (itCCEvent->Param.CC.Controller == pLFO3->ExtController) { |
if (itEvent->Param.CC.Controller == pLFO3->ExtController) { |
679 |
pLFO3->SendEvent(itCCEvent); |
pLFO3->update(itEvent->Param.CC.Value); |
680 |
} |
} |
681 |
if (pDimRgn->AttenuationController.type == ::gig::attenuation_ctrl_t::type_controlchange && |
if (pDimRgn->AttenuationController.type == ::gig::attenuation_ctrl_t::type_controlchange && |
682 |
itCCEvent->Param.CC.Controller == pDimRgn->AttenuationController.controller_number) { // if crossfade event |
itEvent->Param.CC.Controller == pDimRgn->AttenuationController.controller_number) { |
683 |
*pEngineChannel->pSynthesisEvents[Event::destination_vca]->allocAppend() = *itCCEvent; |
processCrossFadeEvent(itEvent); |
684 |
} |
} |
685 |
|
} else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event |
686 |
|
processPitchEvent(itEvent); |
687 |
} |
} |
688 |
|
} |
689 |
|
} |
690 |
|
|
691 |
|
void Voice::processPitchEvent(RTList<Event>::Iterator& itEvent) { |
692 |
|
const float pitch = RTMath::CentsToFreqRatio(((double) itEvent->Param.Pitch.Pitch / 8192.0) * 200.0); // +-two semitones = +-200 cents |
693 |
|
finalSynthesisParameters.fFinalPitch *= pitch; |
694 |
|
PitchBend = pitch; |
695 |
|
} |
696 |
|
|
697 |
++itCCEvent; |
void Voice::processCrossFadeEvent(RTList<Event>::Iterator& itEvent) { |
698 |
|
CrossfadeVolume = CrossfadeAttenuation(itEvent->Param.CC.Value); |
699 |
|
#if CONFIG_PROCESS_MUTED_CHANNELS |
700 |
|
const float effectiveVolume = CrossfadeVolume * Volume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume); |
701 |
|
#else |
702 |
|
const float effectiveVolume = CrossfadeVolume * Volume * pEngineChannel->GlobalVolume; |
703 |
|
#endif |
704 |
|
fFinalVolume = effectiveVolume; |
705 |
|
} |
706 |
|
|
707 |
|
void Voice::processCutoffEvent(RTList<Event>::Iterator& itEvent) { |
708 |
|
int ccvalue = itEvent->Param.CC.Value; |
709 |
|
if (VCFCutoffCtrl.value == ccvalue) return; |
710 |
|
VCFCutoffCtrl.value == ccvalue; |
711 |
|
if (pDimRgn->VCFCutoffControllerInvert) ccvalue = 127 - ccvalue; |
712 |
|
if (ccvalue < pDimRgn->VCFVelocityScale) ccvalue = pDimRgn->VCFVelocityScale; |
713 |
|
float cutoff = CutoffBase * float(ccvalue) * 0.00787402f; // (1 / 127) |
714 |
|
if (cutoff > 1.0) cutoff = 1.0; |
715 |
|
cutoff = (cutoff < 0.5 ? cutoff * 4826 - 1 : cutoff * 5715 - 449); |
716 |
|
if (cutoff < 1.0) cutoff = 1.0; |
717 |
|
|
718 |
|
VCFCutoffCtrl.fvalue = cutoff - 1.0; // needed for initialization of fFinalCutoff next time |
719 |
|
fFinalCutoff = cutoff; |
720 |
|
} |
721 |
|
|
722 |
|
void Voice::processResonanceEvent(RTList<Event>::Iterator& itEvent) { |
723 |
|
// convert absolute controller value to differential |
724 |
|
const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value; |
725 |
|
VCFResonanceCtrl.value = itEvent->Param.CC.Value; |
726 |
|
const float resonancedelta = (float) ctrldelta * 0.00787f; // 0.0..1.0 |
727 |
|
fFinalResonance += resonancedelta; |
728 |
|
// needed for initialization of parameter |
729 |
|
VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value * 0.00787f; |
730 |
|
} |
731 |
|
|
732 |
|
/** |
733 |
|
* Synthesizes the current audio fragment for this voice. |
734 |
|
* |
735 |
|
* @param Samples - number of sample points to be rendered in this audio |
736 |
|
* fragment cycle |
737 |
|
* @param pSrc - pointer to input sample data |
738 |
|
* @param Skip - number of sample points to skip in output buffer |
739 |
|
*/ |
740 |
|
void Voice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
741 |
|
finalSynthesisParameters.pOutLeft = &pEngineChannel->pOutputLeft[Skip]; |
742 |
|
finalSynthesisParameters.pOutRight = &pEngineChannel->pOutputRight[Skip]; |
743 |
|
finalSynthesisParameters.pSrc = pSrc; |
744 |
|
|
745 |
|
RTList<Event>::Iterator itCCEvent = pEngineChannel->pEvents->first(); |
746 |
|
RTList<Event>::Iterator itNoteEvent = pEngineChannel->pMIDIKeyInfo[MIDIKey].pEvents->first(); |
747 |
|
|
748 |
|
if (Skip) { // skip events that happened before this voice was triggered |
749 |
|
while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; |
750 |
|
while (itNoteEvent && itNoteEvent->FragmentPos() <= Skip) ++itNoteEvent; |
751 |
} |
} |
752 |
|
|
753 |
|
uint killPos; |
754 |
|
if (itKillEvent) killPos = RTMath::Min(itKillEvent->FragmentPos(), pEngine->MaxFadeOutPos); |
755 |
|
|
756 |
// process pitch events |
uint i = Skip; |
757 |
{ |
while (i < Samples) { |
758 |
RTList<Event>* pVCOEventList = pEngineChannel->pSynthesisEvents[Event::destination_vco]; |
int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples); |
759 |
RTList<Event>::Iterator itVCOEvent = pVCOEventList->first(); |
|
760 |
if (Delay) { // skip events that happened before this voice was triggered |
// initialize all final synthesis parameters |
761 |
while (itVCOEvent && itVCOEvent->FragmentPos() <= Delay) ++itVCOEvent; |
finalSynthesisParameters.fFinalPitch = PitchBase * PitchBend; |
762 |
} |
#if CONFIG_PROCESS_MUTED_CHANNELS |
763 |
// apply old pitchbend value until first pitch event occurs |
fFinalVolume = this->Volume * this->CrossfadeVolume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume); |
764 |
if (this->PitchBend != 1.0) { |
#else |
765 |
uint end = (itVCOEvent) ? itVCOEvent->FragmentPos() : Samples; |
fFinalVolume = this->Volume * this->CrossfadeVolume * pEngineChannel->GlobalVolume; |
766 |
for (uint i = Delay; i < end; i++) { |
#endif |
767 |
pEngine->pSynthesisParameters[Event::destination_vco][i] *= this->PitchBend; |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
768 |
} |
fFinalResonance = VCFResonanceCtrl.fvalue; |
769 |
|
|
770 |
|
// process MIDI control change and pitchbend events for this subfragment |
771 |
|
processCCEvents(itCCEvent, iSubFragmentEnd); |
772 |
|
|
773 |
|
// process transition events (note on, note off & sustain pedal) |
774 |
|
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
775 |
|
|
776 |
|
// if the voice was killed in this subfragment switch EG1 to fade out stage |
777 |
|
if (itKillEvent && killPos <= iSubFragmentEnd) { |
778 |
|
EG1.enterFadeOutStage(); |
779 |
|
itKillEvent = Pool<Event>::Iterator(); |
780 |
} |
} |
|
float pitch; |
|
|
while (itVCOEvent) { |
|
|
RTList<Event>::Iterator itNextVCOEvent = itVCOEvent; |
|
|
++itNextVCOEvent; |
|
|
|
|
|
// calculate the influence length of this event (in sample points) |
|
|
uint end = (itNextVCOEvent) ? itNextVCOEvent->FragmentPos() : Samples; |
|
|
|
|
|
pitch = RTMath::CentsToFreqRatio(((double) itVCOEvent->Param.Pitch.Pitch / 8192.0) * 200.0); // +-two semitones = +-200 cents |
|
|
|
|
|
// apply pitch value to the pitch parameter sequence |
|
|
for (uint i = itVCOEvent->FragmentPos(); i < end; i++) { |
|
|
pEngine->pSynthesisParameters[Event::destination_vco][i] *= pitch; |
|
|
} |
|
781 |
|
|
782 |
itVCOEvent = itNextVCOEvent; |
// process envelope generators |
783 |
|
switch (EG1.getSegmentType()) { |
784 |
|
case EGADSR::segment_lin: |
785 |
|
fFinalVolume *= EG1.processLin(); |
786 |
|
break; |
787 |
|
case EGADSR::segment_exp: |
788 |
|
fFinalVolume *= EG1.processExp(); |
789 |
|
break; |
790 |
|
case EGADSR::segment_end: |
791 |
|
fFinalVolume *= EG1.getLevel(); |
792 |
|
break; // noop |
793 |
} |
} |
794 |
if (!pVCOEventList->isEmpty()) { |
switch (EG2.getSegmentType()) { |
795 |
this->PitchBend = pitch; |
case EGADSR::segment_lin: |
796 |
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, true); |
fFinalCutoff *= EG2.processLin(); |
797 |
SYNTHESIS_MODE_SET_CONSTPITCH(SynthesisMode, false); |
break; |
798 |
|
case EGADSR::segment_exp: |
799 |
|
fFinalCutoff *= EG2.processExp(); |
800 |
|
break; |
801 |
|
case EGADSR::segment_end: |
802 |
|
fFinalCutoff *= EG2.getLevel(); |
803 |
|
break; // noop |
804 |
} |
} |
805 |
} |
if (EG3.active()) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(EG3.render()); |
806 |
|
|
807 |
// process volume / attenuation events (TODO: we only handle and _expect_ crossfade events here ATM !) |
// process low frequency oscillators |
808 |
{ |
if (bLFO1Enabled) fFinalVolume *= pLFO1->render(); |
809 |
RTList<Event>* pVCAEventList = pEngineChannel->pSynthesisEvents[Event::destination_vca]; |
if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); |
810 |
RTList<Event>::Iterator itVCAEvent = pVCAEventList->first(); |
if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
|
if (Delay) { // skip events that happened before this voice was triggered |
|
|
while (itVCAEvent && itVCAEvent->FragmentPos() <= Delay) ++itVCAEvent; |
|
|
} |
|
|
float crossfadevolume; |
|
|
while (itVCAEvent) { |
|
|
RTList<Event>::Iterator itNextVCAEvent = itVCAEvent; |
|
|
++itNextVCAEvent; |
|
|
|
|
|
// calculate the influence length of this event (in sample points) |
|
|
uint end = (itNextVCAEvent) ? itNextVCAEvent->FragmentPos() : Samples; |
|
|
|
|
|
crossfadevolume = CrossfadeAttenuation(itVCAEvent->Param.CC.Value); |
|
|
|
|
|
#if CONFIG_PROCESS_MUTED_CHANNELS |
|
|
float effective_volume = crossfadevolume * this->Volume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume); |
|
|
#else |
|
|
float effective_volume = crossfadevolume * this->Volume * pEngineChannel->GlobalVolume; |
|
|
#endif |
|
|
|
|
|
// apply volume value to the volume parameter sequence |
|
|
for (uint i = itVCAEvent->FragmentPos(); i < end; i++) { |
|
|
pEngine->pSynthesisParameters[Event::destination_vca][i] = effective_volume; |
|
|
} |
|
811 |
|
|
812 |
itVCAEvent = itNextVCAEvent; |
// if filter enabled then update filter coefficients |
813 |
|
if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) { |
814 |
|
finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff + 1.0, fFinalResonance, pEngine->SampleRate); |
815 |
|
finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff + 1.0, fFinalResonance, pEngine->SampleRate); |
816 |
} |
} |
|
if (!pVCAEventList->isEmpty()) this->CrossfadeVolume = crossfadevolume; |
|
|
} |
|
817 |
|
|
818 |
// process filter cutoff events |
// do we need resampling? |
819 |
{ |
const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f; |
820 |
RTList<Event>* pCutoffEventList = pEngineChannel->pSynthesisEvents[Event::destination_vcfc]; |
const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f; |
821 |
RTList<Event>::Iterator itCutoffEvent = pCutoffEventList->first(); |
const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT && |
822 |
if (Delay) { // skip events that happened before this voice was triggered |
finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT); |
823 |
while (itCutoffEvent && itCutoffEvent->FragmentPos() <= Delay) ++itCutoffEvent; |
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired); |
|
} |
|
|
float cutoff; |
|
|
while (itCutoffEvent) { |
|
|
RTList<Event>::Iterator itNextCutoffEvent = itCutoffEvent; |
|
|
++itNextCutoffEvent; |
|
824 |
|
|
825 |
// calculate the influence length of this event (in sample points) |
// prepare final synthesis parameters structure |
826 |
uint end = (itNextCutoffEvent) ? itNextCutoffEvent->FragmentPos() : Samples; |
finalSynthesisParameters.fFinalVolumeLeft = fFinalVolume * PanLeft; |
827 |
|
finalSynthesisParameters.fFinalVolumeRight = fFinalVolume * PanRight; |
828 |
|
finalSynthesisParameters.uiToGo = iSubFragmentEnd - i; |
829 |
|
|
830 |
int cvalue = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
// render audio for one subfragment |
831 |
if (pDimRgn->VCFCutoffControllerInvert) cvalue = 127 - cvalue; |
RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
|
if (cvalue < pDimRgn->VCFVelocityScale) cvalue = pDimRgn->VCFVelocityScale; |
|
|
cutoff = CutoffBase * float(cvalue) * 0.00787402f; // (1 / 127) |
|
|
if (cutoff > 1.0) cutoff = 1.0; |
|
|
cutoff = exp(cutoff * FILTER_CUTOFF_COEFF) * CONFIG_FILTER_CUTOFF_MIN - CONFIG_FILTER_CUTOFF_MIN; |
|
|
|
|
|
// apply cutoff frequency to the cutoff parameter sequence |
|
|
for (uint i = itCutoffEvent->FragmentPos(); i < end; i++) { |
|
|
pEngine->pSynthesisParameters[Event::destination_vcfc][i] = cutoff; |
|
|
} |
|
832 |
|
|
833 |
itCutoffEvent = itNextCutoffEvent; |
const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch; |
|
} |
|
|
if (!pCutoffEventList->isEmpty()) VCFCutoffCtrl.fvalue = cutoff; // needed for initialization of parameter matrix next time |
|
|
} |
|
834 |
|
|
835 |
// process filter resonance events |
// increment envelopes' positions |
836 |
{ |
if (EG1.active()) { |
837 |
RTList<Event>* pResonanceEventList = pEngineChannel->pSynthesisEvents[Event::destination_vcfr]; |
|
838 |
RTList<Event>::Iterator itResonanceEvent = pResonanceEventList->first(); |
// if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage |
839 |
if (Delay) { // skip events that happened before this voice was triggered |
if (pSample->Loops && Pos <= pSample->LoopStart && pSample->LoopStart < newPos) { |
840 |
while (itResonanceEvent && itResonanceEvent->FragmentPos() <= Delay) ++itResonanceEvent; |
EG1.update(EGADSR::event_hold_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
|
} |
|
|
while (itResonanceEvent) { |
|
|
RTList<Event>::Iterator itNextResonanceEvent = itResonanceEvent; |
|
|
++itNextResonanceEvent; |
|
|
|
|
|
// calculate the influence length of this event (in sample points) |
|
|
uint end = (itNextResonanceEvent) ? itNextResonanceEvent->FragmentPos() : Samples; |
|
|
|
|
|
// convert absolute controller value to differential |
|
|
int ctrldelta = itResonanceEvent->Param.CC.Value - VCFResonanceCtrl.value; |
|
|
VCFResonanceCtrl.value = itResonanceEvent->Param.CC.Value; |
|
|
|
|
|
float resonancedelta = (float) ctrldelta * 0.00787f; // 0.0..1.0 |
|
|
|
|
|
// apply cutoff frequency to the cutoff parameter sequence |
|
|
for (uint i = itResonanceEvent->FragmentPos(); i < end; i++) { |
|
|
pEngine->pSynthesisParameters[Event::destination_vcfr][i] += resonancedelta; |
|
841 |
} |
} |
842 |
|
|
843 |
itResonanceEvent = itNextResonanceEvent; |
EG1.increment(1); |
844 |
|
if (!EG1.toStageEndLeft()) EG1.update(EGADSR::event_stage_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
845 |
} |
} |
846 |
if (!pResonanceEventList->isEmpty()) VCFResonanceCtrl.fvalue = pResonanceEventList->last()->Param.CC.Value * 0.00787f; // needed for initialization of parameter matrix next time |
if (EG2.active()) { |
847 |
} |
EG2.increment(1); |
848 |
} |
if (!EG2.toStageEndLeft()) EG2.update(EGADSR::event_stage_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
|
|
|
|
/** |
|
|
* Calculate all necessary, final biquad filter parameters. |
|
|
* |
|
|
* @param Samples - number of samples to be rendered in this audio fragment cycle |
|
|
*/ |
|
|
void Voice::CalculateBiquadParameters(uint Samples) { |
|
|
biquad_param_t bqbase; |
|
|
biquad_param_t bqmain; |
|
|
float prev_cutoff = pEngine->pSynthesisParameters[Event::destination_vcfc][0]; |
|
|
float prev_res = pEngine->pSynthesisParameters[Event::destination_vcfr][0]; |
|
|
FilterLeft.SetParameters( &bqbase, &bqmain, prev_cutoff + CONFIG_FILTER_CUTOFF_MIN, prev_res, pEngine->SampleRate); |
|
|
FilterRight.SetParameters(&bqbase, &bqmain, prev_cutoff + CONFIG_FILTER_CUTOFF_MIN, prev_res, pEngine->SampleRate); |
|
|
pEngine->pBasicFilterParameters[0] = bqbase; |
|
|
pEngine->pMainFilterParameters[0] = bqmain; |
|
|
|
|
|
float* bq; |
|
|
for (int i = 1; i < Samples; i++) { |
|
|
// recalculate biquad parameters if cutoff or resonance differ from previous sample point |
|
|
if (!(i & FILTER_UPDATE_MASK)) { |
|
|
if (pEngine->pSynthesisParameters[Event::destination_vcfr][i] != prev_res || |
|
|
pEngine->pSynthesisParameters[Event::destination_vcfc][i] != prev_cutoff) |
|
|
{ |
|
|
prev_cutoff = pEngine->pSynthesisParameters[Event::destination_vcfc][i]; |
|
|
prev_res = pEngine->pSynthesisParameters[Event::destination_vcfr][i]; |
|
|
FilterLeft.SetParameters( &bqbase, &bqmain, prev_cutoff + CONFIG_FILTER_CUTOFF_MIN, prev_res, pEngine->SampleRate); |
|
|
FilterRight.SetParameters(&bqbase, &bqmain, prev_cutoff + CONFIG_FILTER_CUTOFF_MIN, prev_res, pEngine->SampleRate); |
|
|
} |
|
849 |
} |
} |
850 |
|
EG3.increment(1); |
851 |
|
if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached |
852 |
|
|
853 |
//same as 'pEngine->pBasicFilterParameters[i] = bqbase;' |
Pos = newPos; |
854 |
bq = (float*) &pEngine->pBasicFilterParameters[i]; |
i = iSubFragmentEnd; |
|
bq[0] = bqbase.b0; |
|
|
bq[1] = bqbase.b1; |
|
|
bq[2] = bqbase.b2; |
|
|
bq[3] = bqbase.a1; |
|
|
bq[4] = bqbase.a2; |
|
|
|
|
|
// same as 'pEngine->pMainFilterParameters[i] = bqmain;' |
|
|
bq = (float*) &pEngine->pMainFilterParameters[i]; |
|
|
bq[0] = bqmain.b0; |
|
|
bq[1] = bqmain.b1; |
|
|
bq[2] = bqmain.b2; |
|
|
bq[3] = bqmain.a1; |
|
|
bq[4] = bqmain.a2; |
|
855 |
} |
} |
856 |
} |
} |
857 |
|
|
858 |
/** |
/** |
|
* Synthesizes the current audio fragment for this voice. |
|
|
* |
|
|
* @param Samples - number of sample points to be rendered in this audio |
|
|
* fragment cycle |
|
|
* @param pSrc - pointer to input sample data |
|
|
* @param Skip - number of sample points to skip in output buffer |
|
|
*/ |
|
|
void Voice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
|
|
RunSynthesisFunction(SynthesisMode, *this, Samples, pSrc, Skip); |
|
|
} |
|
|
|
|
|
/** |
|
859 |
* Immediately kill the voice. This method should not be used to kill |
* Immediately kill the voice. This method should not be used to kill |
860 |
* a normal, active voice, because it doesn't take care of things like |
* a normal, active voice, because it doesn't take care of things like |
861 |
* fading down the volume level to avoid clicks and regular processing |
* fading down the volume level to avoid clicks and regular processing |