3 |
* LinuxSampler - modular, streaming capable sampler * |
* LinuxSampler - modular, streaming capable sampler * |
4 |
* * |
* * |
5 |
* Copyright (C) 2003, 2004 by Benno Senoner and Christian Schoenebeck * |
* Copyright (C) 2003, 2004 by Benno Senoner and Christian Schoenebeck * |
6 |
* Copyright (C) 2005, 2006 Christian Schoenebeck * |
* Copyright (C) 2005 - 2007 Christian Schoenebeck * |
7 |
* * |
* * |
8 |
* This program is free software; you can redistribute it and/or modify * |
* This program is free software; you can redistribute it and/or modify * |
9 |
* it under the terms of the GNU General Public License as published by * |
* it under the terms of the GNU General Public License as published by * |
29 |
|
|
30 |
namespace LinuxSampler { namespace gig { |
namespace LinuxSampler { namespace gig { |
31 |
|
|
|
const float Voice::FILTER_CUTOFF_COEFF(CalculateFilterCutoffCoeff()); |
|
|
|
|
|
float Voice::CalculateFilterCutoffCoeff() { |
|
|
return log(CONFIG_FILTER_CUTOFF_MAX / CONFIG_FILTER_CUTOFF_MIN); |
|
|
} |
|
|
|
|
32 |
Voice::Voice() { |
Voice::Voice() { |
33 |
pEngine = NULL; |
pEngine = NULL; |
34 |
pDiskThread = NULL; |
pDiskThread = NULL; |
38 |
pLFO3 = new LFOSigned(1200.0f); // pitch EG (-1200..+1200 range) |
pLFO3 = new LFOSigned(1200.0f); // pitch EG (-1200..+1200 range) |
39 |
KeyGroup = 0; |
KeyGroup = 0; |
40 |
SynthesisMode = 0; // set all mode bits to 0 first |
SynthesisMode = 0; // set all mode bits to 0 first |
41 |
// select synthesis implementation (currently either pure C++ or MMX+SSE(1)) |
// select synthesis implementation (asm core is not supported ATM) |
42 |
#if CONFIG_ASM && ARCH_X86 |
#if 0 // CONFIG_ASM && ARCH_X86 |
43 |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE()); |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE()); |
44 |
#else |
#else |
45 |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); |
79 |
int Voice::Trigger(EngineChannel* pEngineChannel, Pool<Event>::Iterator& itNoteOnEvent, int PitchBend, ::gig::DimensionRegion* pDimRgn, type_t VoiceType, int iKeyGroup) { |
int Voice::Trigger(EngineChannel* pEngineChannel, Pool<Event>::Iterator& itNoteOnEvent, int PitchBend, ::gig::DimensionRegion* pDimRgn, type_t VoiceType, int iKeyGroup) { |
80 |
this->pEngineChannel = pEngineChannel; |
this->pEngineChannel = pEngineChannel; |
81 |
this->pDimRgn = pDimRgn; |
this->pDimRgn = pDimRgn; |
82 |
|
Orphan = false; |
83 |
|
|
84 |
#if CONFIG_DEVMODE |
#if CONFIG_DEVMODE |
85 |
if (itNoteOnEvent->FragmentPos() > pEngine->MaxSamplesPerCycle) { // just a sanity check for debugging |
if (itNoteOnEvent->FragmentPos() > pEngine->MaxSamplesPerCycle) { // just a sanity check for debugging |
99 |
// calculate volume |
// calculate volume |
100 |
const double velocityAttenuation = pDimRgn->GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity); |
const double velocityAttenuation = pDimRgn->GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity); |
101 |
|
|
102 |
Volume = velocityAttenuation / 32768.0f; // we downscale by 32768 to convert from int16 value range to DSP value range (which is -1.0..1.0) |
// For 16 bit samples, we downscale by 32768 to convert from |
103 |
|
// int16 value range to DSP value range (which is |
104 |
|
// -1.0..1.0). For 24 bit, we downscale from int32. |
105 |
|
float volume = velocityAttenuation / (pSample->BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f); |
106 |
|
|
107 |
Volume *= pDimRgn->SampleAttenuation; |
volume *= pDimRgn->SampleAttenuation * pEngineChannel->GlobalVolume * GLOBAL_VOLUME; |
108 |
|
|
109 |
// the volume of release triggered samples depends on note length |
// the volume of release triggered samples depends on note length |
110 |
if (Type == type_release_trigger) { |
if (Type == type_release_trigger) { |
112 |
pEngineChannel->pMIDIKeyInfo[MIDIKey].NoteOnTime) / pEngine->SampleRate; |
pEngineChannel->pMIDIKeyInfo[MIDIKey].NoteOnTime) / pEngine->SampleRate; |
113 |
float attenuation = 1 - 0.01053 * (256 >> pDimRgn->ReleaseTriggerDecay) * noteLength; |
float attenuation = 1 - 0.01053 * (256 >> pDimRgn->ReleaseTriggerDecay) * noteLength; |
114 |
if (attenuation <= 0) return -1; |
if (attenuation <= 0) return -1; |
115 |
Volume *= attenuation; |
volume *= attenuation; |
116 |
} |
} |
117 |
|
|
118 |
// select channel mode (mono or stereo) |
// select channel mode (mono or stereo) |
119 |
SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, pSample->Channels == 2); |
SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, pSample->Channels == 2); |
120 |
|
// select bit depth (16 or 24) |
121 |
|
SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, pSample->BitDepth == 24); |
122 |
|
|
123 |
// get starting crossfade volume level |
// get starting crossfade volume level |
124 |
|
float crossfadeVolume; |
125 |
switch (pDimRgn->AttenuationController.type) { |
switch (pDimRgn->AttenuationController.type) { |
126 |
case ::gig::attenuation_ctrl_t::type_channelaftertouch: |
case ::gig::attenuation_ctrl_t::type_channelaftertouch: |
127 |
CrossfadeVolume = 1.0f; //TODO: aftertouch not supported yet |
crossfadeVolume = Engine::CrossfadeCurve[CrossfadeAttenuation(pEngineChannel->ControllerTable[128])]; |
128 |
break; |
break; |
129 |
case ::gig::attenuation_ctrl_t::type_velocity: |
case ::gig::attenuation_ctrl_t::type_velocity: |
130 |
CrossfadeVolume = CrossfadeAttenuation(itNoteOnEvent->Param.Note.Velocity); |
crossfadeVolume = Engine::CrossfadeCurve[CrossfadeAttenuation(itNoteOnEvent->Param.Note.Velocity)]; |
131 |
break; |
break; |
132 |
case ::gig::attenuation_ctrl_t::type_controlchange: //FIXME: currently not sample accurate |
case ::gig::attenuation_ctrl_t::type_controlchange: //FIXME: currently not sample accurate |
133 |
CrossfadeVolume = CrossfadeAttenuation(pEngineChannel->ControllerTable[pDimRgn->AttenuationController.controller_number]); |
crossfadeVolume = Engine::CrossfadeCurve[CrossfadeAttenuation(pEngineChannel->ControllerTable[pDimRgn->AttenuationController.controller_number])]; |
134 |
break; |
break; |
135 |
case ::gig::attenuation_ctrl_t::type_none: // no crossfade defined |
case ::gig::attenuation_ctrl_t::type_none: // no crossfade defined |
136 |
default: |
default: |
137 |
CrossfadeVolume = 1.0f; |
crossfadeVolume = 1.0f; |
138 |
} |
} |
139 |
|
|
140 |
PanLeft = 1.0f - float(RTMath::Max(pDimRgn->Pan, 0)) / 63.0f; |
VolumeLeft = volume * Engine::PanCurve[64 - pDimRgn->Pan]; |
141 |
PanRight = 1.0f - float(RTMath::Min(pDimRgn->Pan, 0)) / -64.0f; |
VolumeRight = volume * Engine::PanCurve[64 + pDimRgn->Pan]; |
142 |
|
|
143 |
|
float subfragmentRate = pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE; |
144 |
|
CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate); |
145 |
|
VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate); |
146 |
|
PanLeftSmoother.trigger(pEngineChannel->GlobalPanLeft, subfragmentRate); |
147 |
|
PanRightSmoother.trigger(pEngineChannel->GlobalPanRight, subfragmentRate); |
148 |
|
|
149 |
finalSynthesisParameters.dPos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) |
finalSynthesisParameters.dPos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) |
150 |
Pos = pDimRgn->SampleStartOffset; |
Pos = pDimRgn->SampleStartOffset; |
153 |
long cachedsamples = pSample->GetCache().Size / pSample->FrameSize; |
long cachedsamples = pSample->GetCache().Size / pSample->FrameSize; |
154 |
DiskVoice = cachedsamples < pSample->SamplesTotal; |
DiskVoice = cachedsamples < pSample->SamplesTotal; |
155 |
|
|
156 |
|
const DLS::sample_loop_t& loopinfo = pDimRgn->pSampleLoops[0]; |
157 |
|
|
158 |
if (DiskVoice) { // voice to be streamed from disk |
if (DiskVoice) { // voice to be streamed from disk |
159 |
MaxRAMPos = cachedsamples - (pEngine->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / pSample->Channels; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK) |
MaxRAMPos = cachedsamples - (pEngine->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / pSample->Channels; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK) |
160 |
|
|
161 |
// check if there's a loop defined which completely fits into the cached (RAM) part of the sample |
// check if there's a loop defined which completely fits into the cached (RAM) part of the sample |
162 |
RAMLoop = (pSample->Loops && pSample->LoopEnd <= MaxRAMPos); |
RAMLoop = (pDimRgn->SampleLoops && (loopinfo.LoopStart + loopinfo.LoopLength) <= MaxRAMPos); |
163 |
|
|
164 |
if (pDiskThread->OrderNewStream(&DiskStreamRef, pSample, MaxRAMPos, !RAMLoop) < 0) { |
if (pDiskThread->OrderNewStream(&DiskStreamRef, pDimRgn, MaxRAMPos, !RAMLoop) < 0) { |
165 |
dmsg(1,("Disk stream order failed!\n")); |
dmsg(1,("Disk stream order failed!\n")); |
166 |
KillImmediately(); |
KillImmediately(); |
167 |
return -1; |
return -1; |
170 |
} |
} |
171 |
else { // RAM only voice |
else { // RAM only voice |
172 |
MaxRAMPos = cachedsamples; |
MaxRAMPos = cachedsamples; |
173 |
RAMLoop = (pSample->Loops != 0); |
RAMLoop = (pDimRgn->SampleLoops != 0); |
174 |
dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); |
dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); |
175 |
} |
} |
176 |
if (RAMLoop) { |
if (RAMLoop) { |
177 |
loop.uiTotalCycles = pSample->LoopPlayCount; |
loop.uiTotalCycles = pSample->LoopPlayCount; |
178 |
loop.uiCyclesLeft = pSample->LoopPlayCount; |
loop.uiCyclesLeft = pSample->LoopPlayCount; |
179 |
loop.uiStart = pSample->LoopStart; |
loop.uiStart = loopinfo.LoopStart; |
180 |
loop.uiEnd = pSample->LoopEnd; |
loop.uiEnd = loopinfo.LoopStart + loopinfo.LoopLength; |
181 |
loop.uiSize = pSample->LoopSize; |
loop.uiSize = loopinfo.LoopLength; |
182 |
} |
} |
183 |
|
|
184 |
// calculate initial pitch value |
// calculate initial pitch value |
185 |
{ |
{ |
186 |
double pitchbasecents = pDimRgn->FineTune + (int) pEngine->ScaleTuning[MIDIKey % 12]; |
double pitchbasecents = pDimRgn->FineTune + (int) pEngine->ScaleTuning[MIDIKey % 12]; |
187 |
if (pDimRgn->PitchTrack) pitchbasecents += (MIDIKey - (int) pDimRgn->UnityNote) * 100; |
|
188 |
|
// GSt behaviour: maximum transpose up is 40 semitones. If |
189 |
|
// MIDI key is more than 40 semitones above unity note, |
190 |
|
// the transpose is not done. |
191 |
|
if (pDimRgn->PitchTrack && (MIDIKey - (int) pDimRgn->UnityNote) < 40) pitchbasecents += (MIDIKey - (int) pDimRgn->UnityNote) * 100; |
192 |
|
|
193 |
this->PitchBase = RTMath::CentsToFreqRatio(pitchbasecents) * (double(pSample->SamplesPerSecond) / double(pEngine->SampleRate)); |
this->PitchBase = RTMath::CentsToFreqRatio(pitchbasecents) * (double(pSample->SamplesPerSecond) / double(pEngine->SampleRate)); |
194 |
this->PitchBend = RTMath::CentsToFreqRatio(((double) PitchBend / 8192.0) * 200.0); // pitchbend wheel +-2 semitones = 200 cents |
this->PitchBend = RTMath::CentsToFreqRatio(((double) PitchBend / 8192.0) * 200.0); // pitchbend wheel +-2 semitones = 200 cents |
195 |
} |
} |
206 |
eg1controllervalue = 0; |
eg1controllervalue = 0; |
207 |
break; |
break; |
208 |
case ::gig::eg1_ctrl_t::type_channelaftertouch: |
case ::gig::eg1_ctrl_t::type_channelaftertouch: |
209 |
eg1controllervalue = 0; // TODO: aftertouch not yet supported |
eg1controllervalue = pEngineChannel->ControllerTable[128]; |
210 |
break; |
break; |
211 |
case ::gig::eg1_ctrl_t::type_velocity: |
case ::gig::eg1_ctrl_t::type_velocity: |
212 |
eg1controllervalue = itNoteOnEvent->Param.Note.Velocity; |
eg1controllervalue = itNoteOnEvent->Param.Note.Velocity; |
237 |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
238 |
} |
} |
239 |
|
|
240 |
|
#ifdef CONFIG_INTERPOLATE_VOLUME |
241 |
// setup initial volume in synthesis parameters |
// setup initial volume in synthesis parameters |
242 |
fFinalVolume = getVolume() * EG1.getLevel(); |
#ifdef CONFIG_PROCESS_MUTED_CHANNELS |
243 |
finalSynthesisParameters.fFinalVolumeLeft = fFinalVolume * PanLeft; |
if (pEngineChannel->GetMute()) { |
244 |
finalSynthesisParameters.fFinalVolumeRight = fFinalVolume * PanRight; |
finalSynthesisParameters.fFinalVolumeLeft = 0; |
245 |
|
finalSynthesisParameters.fFinalVolumeRight = 0; |
246 |
|
} |
247 |
|
else |
248 |
|
#else |
249 |
|
{ |
250 |
|
float finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * EG1.getLevel(); |
251 |
|
|
252 |
|
finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * pEngineChannel->GlobalPanLeft; |
253 |
|
finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * pEngineChannel->GlobalPanRight; |
254 |
|
} |
255 |
|
#endif |
256 |
|
#endif |
257 |
|
|
258 |
// setup EG 2 (VCF Cutoff EG) |
// setup EG 2 (VCF Cutoff EG) |
259 |
{ |
{ |
264 |
eg2controllervalue = 0; |
eg2controllervalue = 0; |
265 |
break; |
break; |
266 |
case ::gig::eg2_ctrl_t::type_channelaftertouch: |
case ::gig::eg2_ctrl_t::type_channelaftertouch: |
267 |
eg2controllervalue = 0; // TODO: aftertouch not yet supported |
eg2controllervalue = pEngineChannel->ControllerTable[128]; |
268 |
break; |
break; |
269 |
case ::gig::eg2_ctrl_t::type_velocity: |
case ::gig::eg2_ctrl_t::type_velocity: |
270 |
eg2controllervalue = itNoteOnEvent->Param.Note.Velocity; |
eg2controllervalue = itNoteOnEvent->Param.Note.Velocity; |
342 |
pLFO1->ExtController = 0; // no external controller |
pLFO1->ExtController = 0; // no external controller |
343 |
bLFO1Enabled = false; |
bLFO1Enabled = false; |
344 |
} |
} |
345 |
if (bLFO1Enabled) pLFO1->trigger(pDimRgn->LFO1Frequency, |
if (bLFO1Enabled) { |
346 |
start_level_max, |
pLFO1->trigger(pDimRgn->LFO1Frequency, |
347 |
lfo1_internal_depth, |
start_level_min, |
348 |
pDimRgn->LFO1ControlDepth, |
lfo1_internal_depth, |
349 |
pDimRgn->LFO1FlipPhase, |
pDimRgn->LFO1ControlDepth, |
350 |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
pDimRgn->LFO1FlipPhase, |
351 |
|
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
352 |
|
pLFO1->update(pLFO1->ExtController ? pEngineChannel->ControllerTable[pLFO1->ExtController] : 0); |
353 |
|
} |
354 |
} |
} |
355 |
|
|
356 |
|
|
388 |
pLFO2->ExtController = 0; // no external controller |
pLFO2->ExtController = 0; // no external controller |
389 |
bLFO2Enabled = false; |
bLFO2Enabled = false; |
390 |
} |
} |
391 |
if (bLFO2Enabled) pLFO2->trigger(pDimRgn->LFO2Frequency, |
if (bLFO2Enabled) { |
392 |
start_level_max, |
pLFO2->trigger(pDimRgn->LFO2Frequency, |
393 |
lfo2_internal_depth, |
start_level_max, |
394 |
pDimRgn->LFO2ControlDepth, |
lfo2_internal_depth, |
395 |
pDimRgn->LFO2FlipPhase, |
pDimRgn->LFO2ControlDepth, |
396 |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
pDimRgn->LFO2FlipPhase, |
397 |
|
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
398 |
|
pLFO2->update(pLFO2->ExtController ? pEngineChannel->ControllerTable[pLFO2->ExtController] : 0); |
399 |
|
} |
400 |
} |
} |
401 |
|
|
402 |
|
|
416 |
break; |
break; |
417 |
case ::gig::lfo3_ctrl_aftertouch: |
case ::gig::lfo3_ctrl_aftertouch: |
418 |
lfo3_internal_depth = 0; |
lfo3_internal_depth = 0; |
419 |
pLFO3->ExtController = 0; // TODO: aftertouch not implemented yet |
pLFO3->ExtController = 128; |
420 |
bLFO3Enabled = false; // see TODO comment in line above |
bLFO3Enabled = true; |
421 |
break; |
break; |
422 |
case ::gig::lfo3_ctrl_internal_modwheel: |
case ::gig::lfo3_ctrl_internal_modwheel: |
423 |
lfo3_internal_depth = pDimRgn->LFO3InternalDepth; |
lfo3_internal_depth = pDimRgn->LFO3InternalDepth; |
426 |
break; |
break; |
427 |
case ::gig::lfo3_ctrl_internal_aftertouch: |
case ::gig::lfo3_ctrl_internal_aftertouch: |
428 |
lfo3_internal_depth = pDimRgn->LFO3InternalDepth; |
lfo3_internal_depth = pDimRgn->LFO3InternalDepth; |
429 |
pLFO1->ExtController = 0; // TODO: aftertouch not implemented yet |
pLFO1->ExtController = 128; |
430 |
bLFO3Enabled = (lfo3_internal_depth > 0 /*|| pDimRgn->LFO3ControlDepth > 0*/); // see TODO comment in line above |
bLFO3Enabled = (lfo3_internal_depth > 0 || pDimRgn->LFO3ControlDepth > 0); |
431 |
break; |
break; |
432 |
default: |
default: |
433 |
lfo3_internal_depth = 0; |
lfo3_internal_depth = 0; |
434 |
pLFO3->ExtController = 0; // no external controller |
pLFO3->ExtController = 0; // no external controller |
435 |
bLFO3Enabled = false; |
bLFO3Enabled = false; |
436 |
} |
} |
437 |
if (bLFO3Enabled) pLFO3->trigger(pDimRgn->LFO3Frequency, |
if (bLFO3Enabled) { |
438 |
start_level_mid, |
pLFO3->trigger(pDimRgn->LFO3Frequency, |
439 |
lfo3_internal_depth, |
start_level_mid, |
440 |
pDimRgn->LFO3ControlDepth, |
lfo3_internal_depth, |
441 |
false, |
pDimRgn->LFO3ControlDepth, |
442 |
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
false, |
443 |
|
pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
444 |
|
pLFO3->update(pLFO3->ExtController ? pEngineChannel->ControllerTable[pLFO3->ExtController] : 0); |
445 |
|
} |
446 |
} |
} |
447 |
|
|
448 |
|
|
484 |
case ::gig::vcf_cutoff_ctrl_genpurpose8: |
case ::gig::vcf_cutoff_ctrl_genpurpose8: |
485 |
VCFCutoffCtrl.controller = 83; |
VCFCutoffCtrl.controller = 83; |
486 |
break; |
break; |
487 |
case ::gig::vcf_cutoff_ctrl_aftertouch: //TODO: not implemented yet |
case ::gig::vcf_cutoff_ctrl_aftertouch: |
488 |
|
VCFCutoffCtrl.controller = 128; |
489 |
|
break; |
490 |
case ::gig::vcf_cutoff_ctrl_none: |
case ::gig::vcf_cutoff_ctrl_none: |
491 |
default: |
default: |
492 |
VCFCutoffCtrl.controller = 0; |
VCFCutoffCtrl.controller = 0; |
520 |
finalSynthesisParameters.filterLeft.SetType(pDimRgn->VCFType); |
finalSynthesisParameters.filterLeft.SetType(pDimRgn->VCFType); |
521 |
finalSynthesisParameters.filterRight.SetType(pDimRgn->VCFType); |
finalSynthesisParameters.filterRight.SetType(pDimRgn->VCFType); |
522 |
#else // override filter type |
#else // override filter type |
523 |
FilterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
524 |
FilterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
525 |
#endif // CONFIG_OVERRIDE_FILTER_TYPE |
#endif // CONFIG_OVERRIDE_FILTER_TYPE |
526 |
|
|
527 |
VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
544 |
else { |
else { |
545 |
cvalue = pDimRgn->VCFCutoff; |
cvalue = pDimRgn->VCFCutoff; |
546 |
} |
} |
547 |
cutoff *= float(cvalue) * 0.00787402f; // (1 / 127) |
cutoff *= float(cvalue); |
548 |
if (cutoff > 1.0) cutoff = 1.0; |
if (cutoff > 127.0f) cutoff = 127.0f; |
|
cutoff = (cutoff < 0.5 ? cutoff * 4826 - 1 : cutoff * 5715 - 449); |
|
|
if (cutoff < 1.0) cutoff = 1.0; |
|
549 |
|
|
550 |
// calculate resonance |
// calculate resonance |
551 |
float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : pDimRgn->VCFResonance) * 0.00787f; // 0.0..1.0 |
float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : pDimRgn->VCFResonance); |
552 |
|
|
553 |
VCFCutoffCtrl.fvalue = cutoff - 1.0; |
VCFCutoffCtrl.fvalue = cutoff; |
554 |
VCFResonanceCtrl.fvalue = resonance; |
VCFResonanceCtrl.fvalue = resonance; |
555 |
} |
} |
556 |
else { |
else { |
627 |
} |
} |
628 |
} |
} |
629 |
|
|
630 |
sample_t* ptr = DiskStreamRef.pStream->GetReadPtr(); // get the current read_ptr within the ringbuffer where we read the samples from |
sample_t* ptr = (sample_t*)DiskStreamRef.pStream->GetReadPtr(); // get the current read_ptr within the ringbuffer where we read the samples from |
631 |
|
|
632 |
// render current audio fragment |
// render current audio fragment |
633 |
Synthesize(Samples, ptr, Delay); |
Synthesize(Samples, ptr, Delay); |
680 |
* for the given time. |
* for the given time. |
681 |
* |
* |
682 |
* @param itEvent - iterator pointing to the next event to be processed |
* @param itEvent - iterator pointing to the next event to be processed |
683 |
* @param End - youngest time stamp where processing should be stopped |
* @param End - youngest time stamp where processing should be stopped |
684 |
*/ |
*/ |
685 |
void Voice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) { |
void Voice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) { |
686 |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
699 |
* the given time. |
* the given time. |
700 |
* |
* |
701 |
* @param itEvent - iterator pointing to the next event to be processed |
* @param itEvent - iterator pointing to the next event to be processed |
702 |
* @param End - youngest time stamp where processing should be stopped |
* @param End - youngest time stamp where processing should be stopped |
703 |
*/ |
*/ |
704 |
void Voice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) { |
void Voice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) { |
705 |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
722 |
} |
} |
723 |
if (pDimRgn->AttenuationController.type == ::gig::attenuation_ctrl_t::type_controlchange && |
if (pDimRgn->AttenuationController.type == ::gig::attenuation_ctrl_t::type_controlchange && |
724 |
itEvent->Param.CC.Controller == pDimRgn->AttenuationController.controller_number) { |
itEvent->Param.CC.Controller == pDimRgn->AttenuationController.controller_number) { |
725 |
processCrossFadeEvent(itEvent); |
CrossfadeSmoother.update(Engine::CrossfadeCurve[CrossfadeAttenuation(itEvent->Param.CC.Value)]); |
726 |
|
} |
727 |
|
if (itEvent->Param.CC.Controller == 7) { // volume |
728 |
|
VolumeSmoother.update(Engine::VolumeCurve[itEvent->Param.CC.Value]); |
729 |
|
} else if (itEvent->Param.CC.Controller == 10) { // panpot |
730 |
|
PanLeftSmoother.update(Engine::PanCurve[128 - itEvent->Param.CC.Value]); |
731 |
|
PanRightSmoother.update(Engine::PanCurve[itEvent->Param.CC.Value]); |
732 |
} |
} |
733 |
} else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event |
} else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event |
734 |
processPitchEvent(itEvent); |
processPitchEvent(itEvent); |
742 |
PitchBend = pitch; |
PitchBend = pitch; |
743 |
} |
} |
744 |
|
|
|
void Voice::processCrossFadeEvent(RTList<Event>::Iterator& itEvent) { |
|
|
CrossfadeVolume = CrossfadeAttenuation(itEvent->Param.CC.Value); |
|
|
fFinalVolume = getVolume(); |
|
|
} |
|
|
|
|
|
float Voice::getVolume() { |
|
|
#if CONFIG_PROCESS_MUTED_CHANNELS |
|
|
return pEngineChannel->GetMute() ? 0 : (Volume * CrossfadeVolume * pEngineChannel->GlobalVolume); |
|
|
#else |
|
|
return Volume * CrossfadeVolume * pEngineChannel->GlobalVolume; |
|
|
#endif |
|
|
} |
|
|
|
|
745 |
void Voice::processCutoffEvent(RTList<Event>::Iterator& itEvent) { |
void Voice::processCutoffEvent(RTList<Event>::Iterator& itEvent) { |
746 |
int ccvalue = itEvent->Param.CC.Value; |
int ccvalue = itEvent->Param.CC.Value; |
747 |
if (VCFCutoffCtrl.value == ccvalue) return; |
if (VCFCutoffCtrl.value == ccvalue) return; |
748 |
VCFCutoffCtrl.value == ccvalue; |
VCFCutoffCtrl.value == ccvalue; |
749 |
if (pDimRgn->VCFCutoffControllerInvert) ccvalue = 127 - ccvalue; |
if (pDimRgn->VCFCutoffControllerInvert) ccvalue = 127 - ccvalue; |
750 |
if (ccvalue < pDimRgn->VCFVelocityScale) ccvalue = pDimRgn->VCFVelocityScale; |
if (ccvalue < pDimRgn->VCFVelocityScale) ccvalue = pDimRgn->VCFVelocityScale; |
751 |
float cutoff = CutoffBase * float(ccvalue) * 0.00787402f; // (1 / 127) |
float cutoff = CutoffBase * float(ccvalue); |
752 |
if (cutoff > 1.0) cutoff = 1.0; |
if (cutoff > 127.0f) cutoff = 127.0f; |
|
cutoff = (cutoff < 0.5 ? cutoff * 4826 - 1 : cutoff * 5715 - 449); |
|
|
if (cutoff < 1.0) cutoff = 1.0; |
|
753 |
|
|
754 |
VCFCutoffCtrl.fvalue = cutoff - 1.0; // needed for initialization of fFinalCutoff next time |
VCFCutoffCtrl.fvalue = cutoff; // needed for initialization of fFinalCutoff next time |
755 |
fFinalCutoff = cutoff; |
fFinalCutoff = cutoff; |
756 |
} |
} |
757 |
|
|
759 |
// convert absolute controller value to differential |
// convert absolute controller value to differential |
760 |
const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value; |
const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value; |
761 |
VCFResonanceCtrl.value = itEvent->Param.CC.Value; |
VCFResonanceCtrl.value = itEvent->Param.CC.Value; |
762 |
const float resonancedelta = (float) ctrldelta * 0.00787f; // 0.0..1.0 |
const float resonancedelta = (float) ctrldelta; |
763 |
fFinalResonance += resonancedelta; |
fFinalResonance += resonancedelta; |
764 |
// needed for initialization of parameter |
// needed for initialization of parameter |
765 |
VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value * 0.00787f; |
VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value; |
766 |
} |
} |
767 |
|
|
768 |
/** |
/** |
774 |
* @param Skip - number of sample points to skip in output buffer |
* @param Skip - number of sample points to skip in output buffer |
775 |
*/ |
*/ |
776 |
void Voice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
void Voice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
777 |
finalSynthesisParameters.pOutLeft = &pEngineChannel->pOutputLeft[Skip]; |
finalSynthesisParameters.pOutLeft = &pEngineChannel->pChannelLeft->Buffer()[Skip]; |
778 |
finalSynthesisParameters.pOutRight = &pEngineChannel->pOutputRight[Skip]; |
finalSynthesisParameters.pOutRight = &pEngineChannel->pChannelRight->Buffer()[Skip]; |
779 |
finalSynthesisParameters.pSrc = pSrc; |
finalSynthesisParameters.pSrc = pSrc; |
780 |
|
|
781 |
RTList<Event>::Iterator itCCEvent = pEngineChannel->pEvents->first(); |
RTList<Event>::Iterator itCCEvent = pEngineChannel->pEvents->first(); |
795 |
|
|
796 |
// initialize all final synthesis parameters |
// initialize all final synthesis parameters |
797 |
finalSynthesisParameters.fFinalPitch = PitchBase * PitchBend; |
finalSynthesisParameters.fFinalPitch = PitchBase * PitchBend; |
|
fFinalVolume = getVolume(); |
|
798 |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
799 |
fFinalResonance = VCFResonanceCtrl.fvalue; |
fFinalResonance = VCFResonanceCtrl.fvalue; |
800 |
|
|
801 |
// process MIDI control change and pitchbend events for this subfragment |
// process MIDI control change and pitchbend events for this subfragment |
802 |
processCCEvents(itCCEvent, iSubFragmentEnd); |
processCCEvents(itCCEvent, iSubFragmentEnd); |
803 |
|
|
804 |
|
float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render(); |
805 |
|
#ifdef CONFIG_PROCESS_MUTED_CHANNELS |
806 |
|
if (pEngineChannel->GetMute()) fFinalVolume = 0; |
807 |
|
#endif |
808 |
|
|
809 |
// process transition events (note on, note off & sustain pedal) |
// process transition events (note on, note off & sustain pedal) |
810 |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
811 |
|
|
812 |
// if the voice was killed in this subfragment switch EG1 to fade out stage |
// if the voice was killed in this subfragment, or if the |
813 |
if (itKillEvent && killPos <= iSubFragmentEnd) { |
// filter EG is finished, switch EG1 to fade out stage |
814 |
|
if ((itKillEvent && killPos <= iSubFragmentEnd) || |
815 |
|
(SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && |
816 |
|
EG2.getSegmentType() == EGADSR::segment_end)) { |
817 |
EG1.enterFadeOutStage(); |
EG1.enterFadeOutStage(); |
818 |
itKillEvent = Pool<Event>::Iterator(); |
itKillEvent = Pool<Event>::Iterator(); |
819 |
} |
} |
844 |
if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render(); |
if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render(); |
845 |
|
|
846 |
// process low frequency oscillators |
// process low frequency oscillators |
847 |
if (bLFO1Enabled) fFinalVolume *= pLFO1->render(); |
if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render()); |
848 |
if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); |
if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); |
849 |
if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
850 |
|
|
851 |
// if filter enabled then update filter coefficients |
// if filter enabled then update filter coefficients |
852 |
if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) { |
if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) { |
853 |
finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff + 1.0, fFinalResonance, pEngine->SampleRate); |
finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); |
854 |
finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff + 1.0, fFinalResonance, pEngine->SampleRate); |
finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); |
855 |
} |
} |
856 |
|
|
857 |
// do we need resampling? |
// do we need resampling? |
865 |
finalSynthesisParameters.uiToGo = iSubFragmentEnd - i; |
finalSynthesisParameters.uiToGo = iSubFragmentEnd - i; |
866 |
#ifdef CONFIG_INTERPOLATE_VOLUME |
#ifdef CONFIG_INTERPOLATE_VOLUME |
867 |
finalSynthesisParameters.fFinalVolumeDeltaLeft = |
finalSynthesisParameters.fFinalVolumeDeltaLeft = |
868 |
(fFinalVolume * PanLeft - finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo; |
(fFinalVolume * VolumeLeft * PanLeftSmoother.render() - |
869 |
|
finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo; |
870 |
finalSynthesisParameters.fFinalVolumeDeltaRight = |
finalSynthesisParameters.fFinalVolumeDeltaRight = |
871 |
(fFinalVolume * PanRight - finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo; |
(fFinalVolume * VolumeRight * PanRightSmoother.render() - |
872 |
|
finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo; |
873 |
#else |
#else |
874 |
finalSynthesisParameters.fFinalVolumeLeft = fFinalVolume * PanLeft; |
finalSynthesisParameters.fFinalVolumeLeft = |
875 |
finalSynthesisParameters.fFinalVolumeRight = fFinalVolume * PanRight; |
fFinalVolume * VolumeLeft * PanLeftSmoother.render(); |
876 |
|
finalSynthesisParameters.fFinalVolumeRight = |
877 |
|
fFinalVolume * VolumeRight * PanRightSmoother.render(); |
878 |
#endif |
#endif |
879 |
// render audio for one subfragment |
// render audio for one subfragment |
880 |
RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
888 |
if (EG1.active()) { |
if (EG1.active()) { |
889 |
|
|
890 |
// if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage |
// if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage |
891 |
if (pSample->Loops && Pos <= pSample->LoopStart && pSample->LoopStart < newPos) { |
if (pDimRgn->SampleLoops && Pos <= pDimRgn->pSampleLoops[0].LoopStart && pDimRgn->pSampleLoops[0].LoopStart < newPos) { |
892 |
EG1.update(EGADSR::event_hold_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
EG1.update(EGADSR::event_hold_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
893 |
} |
} |
894 |
|
|
926 |
* fading down the volume level to avoid clicks and regular processing |
* fading down the volume level to avoid clicks and regular processing |
927 |
* until the kill event actually occured! |
* until the kill event actually occured! |
928 |
* |
* |
929 |
* @see Kill() |
* If it's necessary to know when the voice's disk stream was actually |
930 |
|
* deleted, then one can set the optional @a bRequestNotification |
931 |
|
* parameter and this method will then return the handle of the disk |
932 |
|
* stream (unique identifier) and one can use this handle to poll the |
933 |
|
* disk thread if this stream has been deleted. In any case this method |
934 |
|
* will return immediately and will not block until the stream actually |
935 |
|
* was deleted. |
936 |
|
* |
937 |
|
* @param bRequestNotification - (optional) whether the disk thread shall |
938 |
|
* provide a notification once it deleted |
939 |
|
* the respective disk stream |
940 |
|
* (default=false) |
941 |
|
* @returns handle to the voice's disk stream or @c Stream::INVALID_HANDLE |
942 |
|
* if the voice did not use a disk stream at all |
943 |
|
* @see Kill() |
944 |
*/ |
*/ |
945 |
void Voice::KillImmediately() { |
Stream::Handle Voice::KillImmediately(bool bRequestNotification) { |
946 |
|
Stream::Handle hStream = Stream::INVALID_HANDLE; |
947 |
if (DiskVoice && DiskStreamRef.State != Stream::state_unused) { |
if (DiskVoice && DiskStreamRef.State != Stream::state_unused) { |
948 |
pDiskThread->OrderDeletionOfStream(&DiskStreamRef); |
pDiskThread->OrderDeletionOfStream(&DiskStreamRef, bRequestNotification); |
949 |
|
hStream = DiskStreamRef.hStream; |
950 |
} |
} |
951 |
Reset(); |
Reset(); |
952 |
|
return hStream; |
953 |
} |
} |
954 |
|
|
955 |
/** |
/** |