1 |
/*************************************************************************** |
2 |
* * |
3 |
* LinuxSampler - modular, streaming capable sampler * |
4 |
* * |
5 |
* Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck * |
6 |
* Copyright (C) 2005-2020 Christian Schoenebeck * |
7 |
* Copyright (C) 2009-2012 Grigor Iliev * |
8 |
* Copyright (C) 2013-2017 Andreas Persson * |
9 |
* * |
10 |
* This program is free software; you can redistribute it and/or modify * |
11 |
* it under the terms of the GNU General Public License as published by * |
12 |
* the Free Software Foundation; either version 2 of the License, or * |
13 |
* (at your option) any later version. * |
14 |
* * |
15 |
* This program is distributed in the hope that it will be useful, * |
16 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of * |
17 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * |
18 |
* GNU General Public License for more details. * |
19 |
* * |
20 |
* You should have received a copy of the GNU General Public License * |
21 |
* along with this program; if not, write to the Free Software * |
22 |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, * |
23 |
* MA 02111-1307 USA * |
24 |
***************************************************************************/ |
25 |
|
26 |
#include "AbstractVoice.h" |
27 |
|
28 |
namespace LinuxSampler { |
29 |
|
30 |
AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) { |
31 |
pEngineChannel = NULL; |
32 |
pLFO1 = new LFOClusterUnsigned(1.0f); // amplitude LFO (0..1 range) |
33 |
pLFO2 = new LFOClusterUnsigned(1.0f); // filter LFO (0..1 range) |
34 |
pLFO3 = new LFOClusterSigned(1200.0f); // pitch LFO (-1200..+1200 range) |
35 |
PlaybackState = playback_state_end; |
36 |
SynthesisMode = 0; // set all mode bits to 0 first |
37 |
// select synthesis implementation (asm core is not supported ATM) |
38 |
#if 0 // CONFIG_ASM && ARCH_X86 |
39 |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE()); |
40 |
#else |
41 |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); |
42 |
#endif |
43 |
SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled()); |
44 |
|
45 |
finalSynthesisParameters.filterLeft.Reset(); |
46 |
finalSynthesisParameters.filterRight.Reset(); |
47 |
|
48 |
pEq = NULL; |
49 |
bEqSupport = false; |
50 |
} |
51 |
|
52 |
AbstractVoice::~AbstractVoice() { |
53 |
if (pLFO1) delete pLFO1; |
54 |
if (pLFO2) delete pLFO2; |
55 |
if (pLFO3) delete pLFO3; |
56 |
|
57 |
if(pEq != NULL) delete pEq; |
58 |
} |
59 |
|
60 |
void AbstractVoice::CreateEq() { |
61 |
if(!bEqSupport) return; |
62 |
if(pEq != NULL) delete pEq; |
63 |
pEq = new EqSupport; |
64 |
pEq->InitEffect(GetEngine()->pAudioOutputDevice); |
65 |
} |
66 |
|
67 |
/** |
68 |
* Resets voice variables. Should only be called if rendering process is |
69 |
* suspended / not running. |
70 |
*/ |
71 |
void AbstractVoice::Reset() { |
72 |
finalSynthesisParameters.filterLeft.Reset(); |
73 |
finalSynthesisParameters.filterRight.Reset(); |
74 |
DiskStreamRef.pStream = NULL; |
75 |
DiskStreamRef.hStream = 0; |
76 |
DiskStreamRef.State = Stream::state_unused; |
77 |
DiskStreamRef.OrderID = 0; |
78 |
PlaybackState = playback_state_end; |
79 |
itTriggerEvent = Pool<Event>::Iterator(); |
80 |
itKillEvent = Pool<Event>::Iterator(); |
81 |
} |
82 |
|
83 |
/** |
84 |
* Initializes and triggers the voice, a disk stream will be launched if |
85 |
* needed. |
86 |
* |
87 |
* @param pEngineChannel - engine channel on which this voice was ordered |
88 |
* @param itNoteOnEvent - event that caused triggering of this voice |
89 |
* @param PitchBend - MIDI detune factor (-8192 ... +8191) |
90 |
* @param pRegion- points to the region which provides sample wave(s) and articulation data |
91 |
* @param VoiceType - type of this voice |
92 |
* @param iKeyGroup - a value > 0 defines a key group in which this voice is member of |
93 |
* @returns 0 on success, a value < 0 if the voice wasn't triggered |
94 |
* (either due to an error or e.g. because no region is |
95 |
* defined for the given key) |
96 |
*/ |
97 |
int AbstractVoice::Trigger ( |
98 |
AbstractEngineChannel* pEngineChannel, |
99 |
Pool<Event>::Iterator& itNoteOnEvent, |
100 |
int PitchBend, |
101 |
type_t VoiceType, |
102 |
int iKeyGroup |
103 |
) { |
104 |
this->pEngineChannel = pEngineChannel; |
105 |
Orphan = false; |
106 |
|
107 |
#if CONFIG_DEVMODE |
108 |
if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging |
109 |
dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n")); |
110 |
} |
111 |
#endif // CONFIG_DEVMODE |
112 |
|
113 |
Type = VoiceType; |
114 |
pNote = pEngineChannel->pEngine->NoteByID( itNoteOnEvent->Param.Note.ID ); |
115 |
PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet |
116 |
Delay = itNoteOnEvent->FragmentPos(); |
117 |
itTriggerEvent = itNoteOnEvent; |
118 |
itKillEvent = Pool<Event>::Iterator(); |
119 |
MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey()); |
120 |
|
121 |
// when editing key groups with an instrument editor while sound was |
122 |
// already loaded, ActiveKeyGroups may not have the KeyGroup in question |
123 |
// so use find() here instead of array subscript operator[] to avoid an |
124 |
// implied creation of a NULL entry, to prevent a crash while editing |
125 |
// instruments |
126 |
{ |
127 |
AbstractEngineChannel::ActiveKeyGroupMap::const_iterator it = |
128 |
pEngineChannel->ActiveKeyGroups.find(iKeyGroup); |
129 |
pGroupEvents = |
130 |
(iKeyGroup && it != pEngineChannel->ActiveKeyGroups.end()) ? |
131 |
it->second : NULL; |
132 |
} |
133 |
|
134 |
SmplInfo = GetSampleInfo(); |
135 |
RgnInfo = GetRegionInfo(); |
136 |
InstrInfo = GetInstrumentInfo(); |
137 |
|
138 |
MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest); |
139 |
|
140 |
AboutToTrigger(); |
141 |
|
142 |
// calculate volume |
143 |
const double velocityAttenuation = GetVelocityAttenuation(MIDIVelocity()); |
144 |
float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume; |
145 |
if (volume <= 0) return -1; |
146 |
|
147 |
// select channel mode (mono or stereo) |
148 |
SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2); |
149 |
// select bit depth (16 or 24) |
150 |
SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24); |
151 |
|
152 |
// get starting crossfade volume level |
153 |
float crossfadeVolume = CalculateCrossfadeVolume(MIDIVelocity()); |
154 |
|
155 |
VolumeLeft = volume * pKeyInfo->PanLeft; |
156 |
VolumeRight = volume * pKeyInfo->PanRight; |
157 |
|
158 |
// this rate is used for rather mellow volume fades |
159 |
const float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE; |
160 |
// this rate is used for very fast volume fades |
161 |
const float quickRampRate = RTMath::Min(subfragmentRate, GetEngine()->SampleRate * 0.001f /* approx. 13ms */); |
162 |
CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate); |
163 |
|
164 |
VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate); |
165 |
NoteVolume.setCurveOnly(pNote ? pNote->Override.VolumeCurve : DEFAULT_FADE_CURVE); |
166 |
NoteVolume.setCurrentValue(pNote ? pNote->Override.Volume.Value : 1.f); |
167 |
NoteVolume.setDefaultDuration(pNote ? pNote->Override.VolumeTime : DEFAULT_NOTE_VOLUME_TIME_S); |
168 |
NoteVolume.setFinal(pNote ? pNote->Override.Volume.Final : false); |
169 |
|
170 |
// Check if the sample needs disk streaming or is too short for that |
171 |
long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize; |
172 |
DiskVoice = cachedsamples < SmplInfo.TotalFrameCount; |
173 |
|
174 |
SetSampleStartOffset(); |
175 |
|
176 |
if (DiskVoice) { // voice to be streamed from disk |
177 |
if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) { |
178 |
//TODO: this calculation is too pessimistic |
179 |
MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH); |
180 |
} else { |
181 |
// The cache is too small to fit a max sample buffer. |
182 |
// Setting MaxRAMPos to 0 will probably cause a click |
183 |
// in the audio, but it's better than not handling |
184 |
// this case at all, which would have caused the |
185 |
// unsigned MaxRAMPos to be set to a negative number. |
186 |
MaxRAMPos = 0; |
187 |
} |
188 |
|
189 |
// check if there's a loop defined which completely fits into the cached (RAM) part of the sample |
190 |
RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos); |
191 |
|
192 |
if (OrderNewStream()) return -1; |
193 |
dmsg(4,("Disk voice launched (cached samples: %ld, total Samples: %d, MaxRAMPos: %lu, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no")); |
194 |
} |
195 |
else { // RAM only voice |
196 |
MaxRAMPos = cachedsamples; |
197 |
RAMLoop = (SmplInfo.HasLoops); |
198 |
dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); |
199 |
} |
200 |
if (RAMLoop) { |
201 |
loop.uiTotalCycles = SmplInfo.LoopPlayCount; |
202 |
loop.uiCyclesLeft = SmplInfo.LoopPlayCount; |
203 |
loop.uiStart = SmplInfo.LoopStart; |
204 |
loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength; |
205 |
loop.uiSize = SmplInfo.LoopLength; |
206 |
} |
207 |
|
208 |
Pitch = CalculatePitchInfo(PitchBend); |
209 |
NotePitch.setCurveOnly(pNote ? pNote->Override.PitchCurve : DEFAULT_FADE_CURVE); |
210 |
NotePitch.setCurrentValue(pNote ? pNote->Override.Pitch.Value : 1.0f); |
211 |
NotePitch.setFinal(pNote ? pNote->Override.Pitch.Final : false); |
212 |
NotePitch.setDefaultDuration(pNote ? pNote->Override.PitchTime : DEFAULT_NOTE_PITCH_TIME_S); |
213 |
NoteCutoff.Value = (pNote) ? pNote->Override.Cutoff.Value : 1.0f; |
214 |
NoteCutoff.Final = (pNote) ? pNote->Override.Cutoff.isFinal() : false; |
215 |
NoteResonance.Value = (pNote) ? pNote->Override.Resonance.Value : 1.0f; |
216 |
NoteResonance.Final = (pNote) ? pNote->Override.Resonance.Final : false; |
217 |
|
218 |
// the length of the decay and release curves are dependent on the velocity |
219 |
const double velrelease = 1 / GetVelocityRelease(MIDIVelocity()); |
220 |
|
221 |
if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG) |
222 |
// get current value of EG1 controller |
223 |
double eg1controllervalue = GetEG1ControllerValue(MIDIVelocity()); |
224 |
|
225 |
// calculate influence of EG1 controller on EG1's parameters |
226 |
EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue); |
227 |
|
228 |
if (pNote) { |
229 |
pNote->Override.Attack.applyTo(egInfo.Attack); |
230 |
pNote->Override.Decay.applyTo(egInfo.Decay); |
231 |
pNote->Override.Release.applyTo(egInfo.Release); |
232 |
} |
233 |
|
234 |
TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, MIDIVelocity()); |
235 |
} else { |
236 |
pSignalUnitRack->Trigger(); |
237 |
} |
238 |
|
239 |
const uint8_t pan = (pSignalUnitRack) ? pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan) : MIDIPan; |
240 |
for (int c = 0; c < 2; ++c) { |
241 |
float value = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan.Value, c) : 1.f; |
242 |
NotePan[c].setCurveOnly(pNote ? pNote->Override.PanCurve : DEFAULT_FADE_CURVE); |
243 |
NotePan[c].setCurrentValue(value); |
244 |
NotePan[c].setFinal(pNote ? pNote->Override.Pan.Final : false); |
245 |
NotePan[c].setDefaultDuration(pNote ? pNote->Override.PanTime : DEFAULT_NOTE_PAN_TIME_S); |
246 |
} |
247 |
|
248 |
PanLeftSmoother.trigger( |
249 |
AbstractEngine::PanCurve[128 - pan], |
250 |
quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate) |
251 |
); |
252 |
PanRightSmoother.trigger( |
253 |
AbstractEngine::PanCurve[pan], |
254 |
quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate) |
255 |
); |
256 |
|
257 |
#if CONFIG_INTERPOLATE_VOLUME |
258 |
// setup initial volume in synthesis parameters |
259 |
#if CONFIG_PROCESS_MUTED_CHANNELS |
260 |
if (pEngineChannel->GetMute()) { |
261 |
finalSynthesisParameters.fFinalVolumeLeft = 0; |
262 |
finalSynthesisParameters.fFinalVolumeRight = 0; |
263 |
} |
264 |
else |
265 |
#else |
266 |
{ |
267 |
float finalVolume = pEngineChannel->MidiVolume * crossfadeVolume; |
268 |
float fModVolume; |
269 |
if (pSignalUnitRack == NULL) { |
270 |
fModVolume = pEG1->getLevel(); |
271 |
} else { |
272 |
fModVolume = pSignalUnitRack->GetEndpointUnit()->GetVolume(); |
273 |
} |
274 |
NoteVolume.applyCurrentValueTo(fModVolume); |
275 |
finalVolume *= fModVolume; |
276 |
|
277 |
float panL = PanLeftSmoother.render(); |
278 |
float panR = PanRightSmoother.render(); |
279 |
NotePan[0].applyCurrentValueTo(panL); |
280 |
NotePan[1].applyCurrentValueTo(panR); |
281 |
|
282 |
finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * panL; |
283 |
finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * panR; |
284 |
} |
285 |
#endif |
286 |
#endif |
287 |
|
288 |
if (pSignalUnitRack == NULL) { |
289 |
// setup EG 2 (VCF Cutoff EG) |
290 |
{ |
291 |
// get current value of EG2 controller |
292 |
double eg2controllervalue = GetEG2ControllerValue(MIDIVelocity()); |
293 |
|
294 |
// calculate influence of EG2 controller on EG2's parameters |
295 |
EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue); |
296 |
|
297 |
if (pNote) { |
298 |
pNote->Override.CutoffAttack.applyTo(egInfo.Attack); |
299 |
pNote->Override.CutoffDecay.applyTo(egInfo.Decay); |
300 |
pNote->Override.CutoffRelease.applyTo(egInfo.Release); |
301 |
} |
302 |
|
303 |
TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, MIDIVelocity()); |
304 |
} |
305 |
|
306 |
|
307 |
// setup EG 3 (VCO EG) |
308 |
{ |
309 |
// if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch |
310 |
bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f; |
311 |
float eg3depth = (bPortamento) |
312 |
? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey()) * 100) |
313 |
: RTMath::CentsToFreqRatio(RgnInfo.EG3Depth); |
314 |
float eg3time = (bPortamento) |
315 |
? pEngineChannel->PortamentoTime |
316 |
: RgnInfo.EG3Attack; |
317 |
EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
318 |
dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time)); |
319 |
} |
320 |
|
321 |
|
322 |
// setup LFO 1 (VCA LFO) |
323 |
InitLFO1(); |
324 |
// setup LFO 2 (VCF Cutoff LFO) |
325 |
InitLFO2(); |
326 |
// setup LFO 3 (VCO LFO) |
327 |
InitLFO3(); |
328 |
} |
329 |
|
330 |
|
331 |
#if CONFIG_FORCE_FILTER |
332 |
const bool bUseFilter = true; |
333 |
#else // use filter only if instrument file told so |
334 |
const bool bUseFilter = RgnInfo.VCFEnabled; |
335 |
#endif // CONFIG_FORCE_FILTER |
336 |
SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter); |
337 |
if (bUseFilter) { |
338 |
#ifdef CONFIG_OVERRIDE_CUTOFF_CTRL |
339 |
VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL; |
340 |
#else // use the one defined in the instrument file |
341 |
VCFCutoffCtrl.controller = GetVCFCutoffCtrl(); |
342 |
#endif // CONFIG_OVERRIDE_CUTOFF_CTRL |
343 |
|
344 |
#ifdef CONFIG_OVERRIDE_RESONANCE_CTRL |
345 |
VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL; |
346 |
#else // use the one defined in the instrument file |
347 |
VCFResonanceCtrl.controller = GetVCFResonanceCtrl(); |
348 |
#endif // CONFIG_OVERRIDE_RESONANCE_CTRL |
349 |
|
350 |
#ifndef CONFIG_OVERRIDE_FILTER_TYPE |
351 |
finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType); |
352 |
finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType); |
353 |
#else // override filter type |
354 |
finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
355 |
finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
356 |
#endif // CONFIG_OVERRIDE_FILTER_TYPE |
357 |
|
358 |
VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
359 |
VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller]; |
360 |
|
361 |
// calculate cutoff frequency |
362 |
CutoffBase = CalculateCutoffBase(MIDIVelocity()); |
363 |
|
364 |
VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase); |
365 |
|
366 |
// calculate resonance |
367 |
float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance); |
368 |
VCFResonanceCtrl.fvalue = resonance; |
369 |
} else { |
370 |
VCFCutoffCtrl.controller = 0; |
371 |
VCFResonanceCtrl.controller = 0; |
372 |
} |
373 |
|
374 |
const bool bEq = |
375 |
pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport(); |
376 |
|
377 |
if (bEq) { |
378 |
pEq->GetInChannelLeft()->Clear(); |
379 |
pEq->GetInChannelRight()->Clear(); |
380 |
pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle()); |
381 |
} |
382 |
|
383 |
return 0; // success |
384 |
} |
385 |
|
386 |
void AbstractVoice::SetSampleStartOffset() { |
387 |
double pos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample |
388 |
|
389 |
// if another sample playback start position was requested by instrument |
390 |
// script (built-in script function play_note()) |
391 |
if (pNote && pNote->Override.SampleOffset >= 0) { |
392 |
double overridePos = |
393 |
double(SmplInfo.SampleRate) * double(pNote->Override.SampleOffset) / 1000000.0; |
394 |
if (overridePos < SmplInfo.TotalFrameCount) |
395 |
pos = overridePos; |
396 |
} |
397 |
|
398 |
finalSynthesisParameters.dPos = pos; |
399 |
Pos = pos; |
400 |
} |
401 |
|
402 |
/** |
403 |
* Synthesizes the current audio fragment for this voice. |
404 |
* |
405 |
* @param Samples - number of sample points to be rendered in this audio |
406 |
* fragment cycle |
407 |
* @param pSrc - pointer to input sample data |
408 |
* @param Skip - number of sample points to skip in output buffer |
409 |
*/ |
410 |
void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
411 |
bool delay = false; // Whether the voice playback should be delayed for this call |
412 |
|
413 |
if (pSignalUnitRack != NULL) { |
414 |
uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger(); |
415 |
if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback |
416 |
if (delaySteps >= Samples) { |
417 |
pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples); |
418 |
delay = true; |
419 |
} else { |
420 |
pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps); |
421 |
Samples -= delaySteps; |
422 |
Skip += delaySteps; |
423 |
} |
424 |
} |
425 |
} |
426 |
|
427 |
AbstractEngineChannel* pChannel = pEngineChannel; |
428 |
MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey()); |
429 |
|
430 |
const bool bVoiceRequiresDedicatedRouting = |
431 |
pEngineChannel->GetFxSendCount() > 0 && |
432 |
(pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend); |
433 |
|
434 |
const bool bEq = |
435 |
pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport(); |
436 |
|
437 |
if (bEq) { |
438 |
pEq->GetInChannelLeft()->Clear(); |
439 |
pEq->GetInChannelRight()->Clear(); |
440 |
finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip]; |
441 |
finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip]; |
442 |
pSignalUnitRack->UpdateEqSettings(pEq); |
443 |
} else if (bVoiceRequiresDedicatedRouting) { |
444 |
finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip]; |
445 |
finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip]; |
446 |
} else { |
447 |
finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip]; |
448 |
finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip]; |
449 |
} |
450 |
finalSynthesisParameters.pSrc = pSrc; |
451 |
|
452 |
RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first(); |
453 |
RTList<Event>::Iterator itNoteEvent; |
454 |
GetFirstEventOnKey(HostKey(), itNoteEvent); |
455 |
|
456 |
RTList<Event>::Iterator itGroupEvent; |
457 |
if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first(); |
458 |
|
459 |
if (itTriggerEvent) { // skip events that happened before this voice was triggered |
460 |
while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; |
461 |
while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent; |
462 |
|
463 |
// we can't simply compare the timestamp here, because note events |
464 |
// might happen on the same time stamp, so we have to deal on the |
465 |
// actual sequence the note events arrived instead (see bug #112) |
466 |
for (; itNoteEvent; ++itNoteEvent) { |
467 |
if (itTriggerEvent == itNoteEvent) { |
468 |
++itNoteEvent; |
469 |
break; |
470 |
} |
471 |
} |
472 |
} |
473 |
|
474 |
uint killPos = 0; |
475 |
if (itKillEvent) { |
476 |
int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples(); |
477 |
if (maxFadeOutPos < 0) { |
478 |
// There's not enough space in buffer to do a fade out |
479 |
// from max volume (this can only happen for audio |
480 |
// drivers that use Samples < MaxSamplesPerCycle). |
481 |
// End the EG1 here, at pos 0, with a shorter max fade |
482 |
// out time. |
483 |
if (pSignalUnitRack == NULL) { |
484 |
pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
485 |
} else { |
486 |
pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
487 |
} |
488 |
itKillEvent = Pool<Event>::Iterator(); |
489 |
} else { |
490 |
killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos); |
491 |
} |
492 |
} |
493 |
|
494 |
uint i = Skip; |
495 |
while (i < Samples) { |
496 |
int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples); |
497 |
|
498 |
// initialize all final synthesis parameters |
499 |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
500 |
fFinalResonance = VCFResonanceCtrl.fvalue; |
501 |
|
502 |
// process MIDI control change, aftertouch and pitchbend events for this subfragment |
503 |
processCCEvents(itCCEvent, iSubFragmentEnd); |
504 |
uint8_t pan = MIDIPan; |
505 |
if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan); |
506 |
|
507 |
PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan]); |
508 |
PanRightSmoother.update(AbstractEngine::PanCurve[pan]); |
509 |
|
510 |
finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend; |
511 |
|
512 |
float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render(); |
513 |
#if CONFIG_PROCESS_MUTED_CHANNELS |
514 |
if (pChannel->GetMute()) fFinalVolume = 0; |
515 |
#endif |
516 |
|
517 |
// process transition events (note on, note off & sustain pedal) |
518 |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
519 |
processGroupEvents(itGroupEvent, iSubFragmentEnd); |
520 |
|
521 |
float fModVolume = 1; |
522 |
float fModPitch = 1; |
523 |
|
524 |
if (pSignalUnitRack == NULL) { |
525 |
// if the voice was killed in this subfragment, or if the |
526 |
// filter EG is finished, switch EG1 to fade out stage |
527 |
if ((itKillEvent && killPos <= iSubFragmentEnd) || |
528 |
(SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && |
529 |
pEG2->getSegmentType() == EG::segment_end)) { |
530 |
pEG1->enterFadeOutStage(); |
531 |
itKillEvent = Pool<Event>::Iterator(); |
532 |
} |
533 |
|
534 |
// process envelope generators |
535 |
switch (pEG1->getSegmentType()) { |
536 |
case EG::segment_lin: |
537 |
fModVolume *= pEG1->processLin(); |
538 |
break; |
539 |
case EG::segment_exp: |
540 |
fModVolume *= pEG1->processExp(); |
541 |
break; |
542 |
case EG::segment_end: |
543 |
fModVolume *= pEG1->getLevel(); |
544 |
break; // noop |
545 |
case EG::segment_pow: |
546 |
fModVolume *= pEG1->processPow(); |
547 |
break; |
548 |
} |
549 |
switch (pEG2->getSegmentType()) { |
550 |
case EG::segment_lin: |
551 |
fFinalCutoff *= pEG2->processLin(); |
552 |
break; |
553 |
case EG::segment_exp: |
554 |
fFinalCutoff *= pEG2->processExp(); |
555 |
break; |
556 |
case EG::segment_end: |
557 |
fFinalCutoff *= pEG2->getLevel(); |
558 |
break; // noop |
559 |
case EG::segment_pow: |
560 |
fFinalCutoff *= pEG2->processPow(); |
561 |
break; |
562 |
} |
563 |
if (EG3.active()) fModPitch *= EG3.render(); |
564 |
|
565 |
// process low frequency oscillators |
566 |
if (bLFO1Enabled) fModVolume *= (1.0f - pLFO1->render()); |
567 |
if (bLFO2Enabled) fFinalCutoff *= (1.0f - pLFO2->render()); |
568 |
if (bLFO3Enabled) fModPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
569 |
} else { |
570 |
// if the voice was killed in this subfragment, enter fade out stage |
571 |
if (itKillEvent && killPos <= iSubFragmentEnd) { |
572 |
pSignalUnitRack->EnterFadeOutStage(); |
573 |
itKillEvent = Pool<Event>::Iterator(); |
574 |
} |
575 |
|
576 |
// if the filter EG is finished, switch EG1 to fade out stage |
577 |
/*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && |
578 |
pEG2->getSegmentType() == EG::segment_end) { |
579 |
pEG1->enterFadeOutStage(); |
580 |
itKillEvent = Pool<Event>::Iterator(); |
581 |
}*/ |
582 |
// TODO: ^^^ |
583 |
|
584 |
fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume(); |
585 |
fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff); |
586 |
fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance); |
587 |
|
588 |
fModPitch = pSignalUnitRack->GetEndpointUnit()->CalculatePitch(fModPitch); |
589 |
} |
590 |
|
591 |
NoteVolume.renderApplyTo(fModVolume); |
592 |
NotePitch.renderApplyTo(fModPitch); |
593 |
NoteCutoff.applyTo(fFinalCutoff); |
594 |
NoteResonance.applyTo(fFinalResonance); |
595 |
|
596 |
fFinalVolume *= fModVolume; |
597 |
|
598 |
finalSynthesisParameters.fFinalPitch *= fModPitch; |
599 |
|
600 |
// limit the pitch so we don't read outside the buffer |
601 |
finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH)); |
602 |
|
603 |
// if filter enabled then update filter coefficients |
604 |
if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) { |
605 |
finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate); |
606 |
finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate); |
607 |
} |
608 |
|
609 |
// do we need resampling? |
610 |
const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f; |
611 |
const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f; |
612 |
const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT && |
613 |
finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT); |
614 |
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired); |
615 |
|
616 |
// prepare final synthesis parameters structure |
617 |
finalSynthesisParameters.uiToGo = iSubFragmentEnd - i; |
618 |
|
619 |
float panL = PanLeftSmoother.render(); |
620 |
float panR = PanRightSmoother.render(); |
621 |
NotePan[0].renderApplyTo(panL); |
622 |
NotePan[1].renderApplyTo(panR); |
623 |
|
624 |
#if CONFIG_INTERPOLATE_VOLUME |
625 |
finalSynthesisParameters.fFinalVolumeDeltaLeft = |
626 |
(fFinalVolume * VolumeLeft * panL - |
627 |
finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo; |
628 |
finalSynthesisParameters.fFinalVolumeDeltaRight = |
629 |
(fFinalVolume * VolumeRight * panR - |
630 |
finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo; |
631 |
#else |
632 |
finalSynthesisParameters.fFinalVolumeLeft = |
633 |
fFinalVolume * VolumeLeft * panL; |
634 |
finalSynthesisParameters.fFinalVolumeRight = |
635 |
fFinalVolume * VolumeRight * panR; |
636 |
#endif |
637 |
// render audio for one subfragment |
638 |
if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
639 |
|
640 |
if (pSignalUnitRack == NULL) { |
641 |
// stop the rendering if volume EG is finished |
642 |
if (pEG1->getSegmentType() == EG::segment_end) break; |
643 |
} else { |
644 |
// stop the rendering if the endpoint unit is not active |
645 |
if (!pSignalUnitRack->GetEndpointUnit()->Active()) break; |
646 |
} |
647 |
|
648 |
const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch; |
649 |
|
650 |
if (pSignalUnitRack == NULL) { |
651 |
// increment envelopes' positions |
652 |
if (pEG1->active()) { |
653 |
|
654 |
// if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage |
655 |
if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) { |
656 |
pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
657 |
} |
658 |
|
659 |
pEG1->increment(1); |
660 |
if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
661 |
} |
662 |
if (pEG2->active()) { |
663 |
pEG2->increment(1); |
664 |
if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
665 |
} |
666 |
EG3.increment(1); |
667 |
if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached |
668 |
} else { |
669 |
// if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage |
670 |
/*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) { |
671 |
pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
672 |
}*/ |
673 |
// TODO: ^^^ |
674 |
|
675 |
if (!delay) pSignalUnitRack->Increment(); |
676 |
} |
677 |
|
678 |
Pos = newPos; |
679 |
i = iSubFragmentEnd; |
680 |
} |
681 |
|
682 |
if (delay) return; |
683 |
|
684 |
if (bVoiceRequiresDedicatedRouting) { |
685 |
if (bEq) { |
686 |
pEq->RenderAudio(Samples); |
687 |
pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples); |
688 |
pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples); |
689 |
} |
690 |
optional<float> effectSendLevels[2] = { |
691 |
pMidiKeyInfo->ReverbSend, |
692 |
pMidiKeyInfo->ChorusSend |
693 |
}; |
694 |
GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples); |
695 |
} else if (bEq) { |
696 |
pEq->RenderAudio(Samples); |
697 |
pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples); |
698 |
pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples); |
699 |
} |
700 |
} |
701 |
|
702 |
/** |
703 |
* Process given list of MIDI control change, aftertouch and pitch bend |
704 |
* events for the given time. |
705 |
* |
706 |
* @param itEvent - iterator pointing to the next event to be processed |
707 |
* @param End - youngest time stamp where processing should be stopped |
708 |
*/ |
709 |
void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) { |
710 |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
711 |
if ((itEvent->Type == Event::type_control_change || itEvent->Type == Event::type_channel_pressure) |
712 |
&& itEvent->Param.CC.Controller) // if (valid) MIDI control change event |
713 |
{ |
714 |
if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) { |
715 |
ProcessCutoffEvent(itEvent); |
716 |
} |
717 |
if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) { |
718 |
processResonanceEvent(itEvent); |
719 |
} |
720 |
if (itEvent->Param.CC.Controller == CTRL_TABLE_IDX_AFTERTOUCH || |
721 |
itEvent->Type == Event::type_channel_pressure) |
722 |
{ |
723 |
ProcessChannelPressureEvent(itEvent); |
724 |
} |
725 |
if (pSignalUnitRack == NULL) { |
726 |
if (itEvent->Param.CC.Controller == pLFO1->ExtController) { |
727 |
pLFO1->updateByMIDICtrlValue(itEvent->Param.CC.Value); |
728 |
} |
729 |
if (itEvent->Param.CC.Controller == pLFO2->ExtController) { |
730 |
pLFO2->updateByMIDICtrlValue(itEvent->Param.CC.Value); |
731 |
} |
732 |
if (itEvent->Param.CC.Controller == pLFO3->ExtController) { |
733 |
pLFO3->updateByMIDICtrlValue(itEvent->Param.CC.Value); |
734 |
} |
735 |
} |
736 |
if (itEvent->Param.CC.Controller == 7) { // volume |
737 |
VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]); |
738 |
} else if (itEvent->Param.CC.Controller == 10) { // panpot |
739 |
MIDIPan = CalculatePan(itEvent->Param.CC.Value); |
740 |
} |
741 |
} else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event |
742 |
processPitchEvent(itEvent); |
743 |
} else if (itEvent->Type == Event::type_note_pressure) { |
744 |
ProcessPolyphonicKeyPressureEvent(itEvent); |
745 |
} |
746 |
|
747 |
ProcessCCEvent(itEvent); |
748 |
if (pSignalUnitRack != NULL) { |
749 |
pSignalUnitRack->ProcessCCEvent(itEvent); |
750 |
} |
751 |
} |
752 |
} |
753 |
|
754 |
void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) { |
755 |
Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange); |
756 |
} |
757 |
|
758 |
void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) { |
759 |
// convert absolute controller value to differential |
760 |
const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value; |
761 |
VCFResonanceCtrl.value = itEvent->Param.CC.Value; |
762 |
const float resonancedelta = (float) ctrldelta; |
763 |
fFinalResonance += resonancedelta; |
764 |
// needed for initialization of parameter |
765 |
VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value; |
766 |
} |
767 |
|
768 |
/** |
769 |
* Process given list of MIDI note on, note off, sustain pedal events and |
770 |
* note synthesis parameter events for the given time. |
771 |
* |
772 |
* @param itEvent - iterator pointing to the next event to be processed |
773 |
* @param End - youngest time stamp where processing should be stopped |
774 |
*/ |
775 |
void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) { |
776 |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
777 |
// some voice types ignore note off |
778 |
if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) { |
779 |
if (itEvent->Type == Event::type_release_key) { |
780 |
EnterReleaseStage(); |
781 |
} else if (itEvent->Type == Event::type_cancel_release_key) { |
782 |
if (pSignalUnitRack == NULL) { |
783 |
pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
784 |
pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
785 |
} else { |
786 |
pSignalUnitRack->CancelRelease(); |
787 |
} |
788 |
} |
789 |
} |
790 |
// process stop-note events (caused by built-in instrument script function note_off()) |
791 |
if (itEvent->Type == Event::type_release_note && pNote && |
792 |
pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote) |
793 |
{ |
794 |
EnterReleaseStage(); |
795 |
} |
796 |
// process kill-note events (caused by built-in instrument script function fade_out()) |
797 |
if (itEvent->Type == Event::type_kill_note && pNote && |
798 |
pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote) |
799 |
{ |
800 |
Kill(itEvent); |
801 |
} |
802 |
// process synthesis parameter events (caused by built-in realt-time instrument script functions) |
803 |
if (itEvent->Type == Event::type_note_synth_param && pNote && |
804 |
pEngineChannel->pEngine->NoteByID( itEvent->Param.NoteSynthParam.NoteID ) == pNote) |
805 |
{ |
806 |
switch (itEvent->Param.NoteSynthParam.Type) { |
807 |
case Event::synth_param_volume: |
808 |
NoteVolume.fadeTo(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
809 |
NoteVolume.setFinal(itEvent->Param.NoteSynthParam.isFinal()); |
810 |
break; |
811 |
case Event::synth_param_volume_time: |
812 |
NoteVolume.setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue); |
813 |
break; |
814 |
case Event::synth_param_volume_curve: |
815 |
NoteVolume.setCurve((fade_curve_t)itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
816 |
break; |
817 |
case Event::synth_param_pitch: |
818 |
NotePitch.fadeTo(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
819 |
NotePitch.setFinal(itEvent->Param.NoteSynthParam.isFinal()); |
820 |
break; |
821 |
case Event::synth_param_pitch_time: |
822 |
NotePitch.setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue); |
823 |
break; |
824 |
case Event::synth_param_pitch_curve: |
825 |
NotePitch.setCurve((fade_curve_t)itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
826 |
break; |
827 |
case Event::synth_param_pan: |
828 |
NotePan[0].fadeTo( |
829 |
AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 0 /*left*/), |
830 |
GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE |
831 |
); |
832 |
NotePan[1].fadeTo( |
833 |
AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 1 /*right*/), |
834 |
GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE |
835 |
); |
836 |
NotePan[0].setFinal(itEvent->Param.NoteSynthParam.isFinal()); |
837 |
NotePan[1].setFinal(itEvent->Param.NoteSynthParam.isFinal()); |
838 |
break; |
839 |
case Event::synth_param_pan_time: |
840 |
NotePan[0].setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue); |
841 |
NotePan[1].setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue); |
842 |
break; |
843 |
case Event::synth_param_pan_curve: |
844 |
NotePan[0].setCurve((fade_curve_t)itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
845 |
NotePan[1].setCurve((fade_curve_t)itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
846 |
break; |
847 |
case Event::synth_param_cutoff: |
848 |
NoteCutoff.Value = itEvent->Param.NoteSynthParam.AbsValue; |
849 |
NoteCutoff.Final = itEvent->Param.NoteSynthParam.isFinal(); |
850 |
break; |
851 |
case Event::synth_param_resonance: |
852 |
NoteResonance.Value = itEvent->Param.NoteSynthParam.AbsValue; |
853 |
NoteResonance.Final = itEvent->Param.NoteSynthParam.isFinal(); |
854 |
break; |
855 |
case Event::synth_param_amp_lfo_depth: |
856 |
pLFO1->setScriptDepthFactor( |
857 |
itEvent->Param.NoteSynthParam.AbsValue, |
858 |
itEvent->Param.NoteSynthParam.isFinal() |
859 |
); |
860 |
break; |
861 |
case Event::synth_param_amp_lfo_freq: |
862 |
if (itEvent->Param.NoteSynthParam.isFinal()) |
863 |
pLFO1->setScriptFrequencyFinal(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
864 |
else |
865 |
pLFO1->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
866 |
break; |
867 |
case Event::synth_param_cutoff_lfo_depth: |
868 |
pLFO2->setScriptDepthFactor( |
869 |
itEvent->Param.NoteSynthParam.AbsValue, |
870 |
itEvent->Param.NoteSynthParam.isFinal() |
871 |
); |
872 |
break; |
873 |
case Event::synth_param_cutoff_lfo_freq: |
874 |
if (itEvent->Param.NoteSynthParam.isFinal()) |
875 |
pLFO2->setScriptFrequencyFinal(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
876 |
else |
877 |
pLFO2->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
878 |
break; |
879 |
case Event::synth_param_pitch_lfo_depth: |
880 |
pLFO3->setScriptDepthFactor( |
881 |
itEvent->Param.NoteSynthParam.AbsValue, |
882 |
itEvent->Param.NoteSynthParam.isFinal() |
883 |
); |
884 |
break; |
885 |
case Event::synth_param_pitch_lfo_freq: |
886 |
pLFO3->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
887 |
break; |
888 |
|
889 |
case Event::synth_param_attack: |
890 |
case Event::synth_param_decay: |
891 |
case Event::synth_param_sustain: |
892 |
case Event::synth_param_release: |
893 |
case Event::synth_param_cutoff_attack: |
894 |
case Event::synth_param_cutoff_decay: |
895 |
case Event::synth_param_cutoff_sustain: |
896 |
case Event::synth_param_cutoff_release: |
897 |
break; // noop |
898 |
} |
899 |
} |
900 |
} |
901 |
} |
902 |
|
903 |
/** |
904 |
* Process given list of events aimed at all voices in a key group. |
905 |
* |
906 |
* @param itEvent - iterator pointing to the next event to be processed |
907 |
* @param End - youngest time stamp where processing should be stopped |
908 |
*/ |
909 |
void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) { |
910 |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
911 |
ProcessGroupEvent(itEvent); |
912 |
} |
913 |
} |
914 |
|
915 |
/** @brief Update current portamento position. |
916 |
* |
917 |
* Will be called when portamento mode is enabled to get the final |
918 |
* portamento position of this active voice from where the next voice(s) |
919 |
* might continue to slide on. |
920 |
* |
921 |
* @param itNoteOffEvent - event which causes this voice to die soon |
922 |
*/ |
923 |
void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) { |
924 |
if (pSignalUnitRack == NULL) { |
925 |
const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos()); |
926 |
pEngineChannel->PortamentoPos = (float) MIDIKey() + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f; |
927 |
} else { |
928 |
// TODO: |
929 |
} |
930 |
} |
931 |
|
932 |
/** |
933 |
* Kill the voice in regular sense. Let the voice render audio until |
934 |
* the kill event actually occured and then fade down the volume level |
935 |
* very quickly and let the voice die finally. Unlike a normal release |
936 |
* of a voice, a kill process cannot be cancalled and is therefore |
937 |
* usually used for voice stealing and key group conflicts. |
938 |
* |
939 |
* @param itKillEvent - event which caused the voice to be killed |
940 |
*/ |
941 |
void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) { |
942 |
#if CONFIG_DEVMODE |
943 |
if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n")); |
944 |
if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n")); |
945 |
#endif // CONFIG_DEVMODE |
946 |
|
947 |
if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return; |
948 |
this->itKillEvent = itKillEvent; |
949 |
} |
950 |
|
951 |
Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) { |
952 |
PitchInfo pitch; |
953 |
double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12]; |
954 |
|
955 |
// GSt behaviour: maximum transpose up is 40 semitones. If |
956 |
// MIDI key is more than 40 semitones above unity note, |
957 |
// the transpose is not done. |
958 |
// |
959 |
// Update: Removed this GSt misbehavior. I don't think that any stock |
960 |
// gig sound requires it to resemble its original sound. |
961 |
// -- Christian, 2017-07-09 |
962 |
if (!SmplInfo.Unpitched /* && (MIDIKey() - (int) RgnInfo.UnityNote) < 40*/) |
963 |
pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100; |
964 |
|
965 |
pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate)); |
966 |
pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange; |
967 |
pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange); |
968 |
|
969 |
return pitch; |
970 |
} |
971 |
|
972 |
void AbstractVoice::onScaleTuningChanged() { |
973 |
PitchInfo pitch = this->Pitch; |
974 |
double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12]; |
975 |
|
976 |
// GSt behaviour: maximum transpose up is 40 semitones. If |
977 |
// MIDI key is more than 40 semitones above unity note, |
978 |
// the transpose is not done. |
979 |
// |
980 |
// Update: Removed this GSt misbehavior. I don't think that any stock |
981 |
// gig sound requires it to resemble its original sound. |
982 |
// -- Christian, 2017-07-09 |
983 |
if (!SmplInfo.Unpitched /* && (MIDIKey() - (int) RgnInfo.UnityNote) < 40*/) |
984 |
pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100; |
985 |
|
986 |
pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate)); |
987 |
this->Pitch = pitch; |
988 |
} |
989 |
|
990 |
double AbstractVoice::CalculateVolume(double velocityAttenuation) { |
991 |
// For 16 bit samples, we downscale by 32768 to convert from |
992 |
// int16 value range to DSP value range (which is |
993 |
// -1.0..1.0). For 24 bit, we downscale from int32. |
994 |
float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f); |
995 |
|
996 |
volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME; |
997 |
|
998 |
// the volume of release triggered samples depends on note length |
999 |
if (Type & Voice::type_release_trigger) { |
1000 |
float noteLength = float(GetEngine()->FrameTime + Delay - |
1001 |
GetNoteOnTime(MIDIKey()) ) / GetEngine()->SampleRate; |
1002 |
|
1003 |
volume *= GetReleaseTriggerAttenuation(noteLength); |
1004 |
} |
1005 |
|
1006 |
return volume; |
1007 |
} |
1008 |
|
1009 |
float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) { |
1010 |
return 1 - RgnInfo.ReleaseTriggerDecay * noteLength; |
1011 |
} |
1012 |
|
1013 |
void AbstractVoice::EnterReleaseStage() { |
1014 |
if (pSignalUnitRack == NULL) { |
1015 |
pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
1016 |
pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
1017 |
} else { |
1018 |
pSignalUnitRack->EnterReleaseStage(); |
1019 |
} |
1020 |
} |
1021 |
|
1022 |
bool AbstractVoice::EG1Finished() { |
1023 |
if (pSignalUnitRack == NULL) { |
1024 |
return pEG1->getSegmentType() == EG::segment_end; |
1025 |
} else { |
1026 |
return !pSignalUnitRack->GetEndpointUnit()->Active(); |
1027 |
} |
1028 |
} |
1029 |
|
1030 |
} // namespace LinuxSampler |