/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3118 - (show annotations) (download)
Fri Apr 21 13:33:03 2017 UTC (4 months ago) by schoenebeck
File size: 43797 byte(s)
* NKSP: Fixed crash when using built-in script array variable "%ALL_EVENTS".
* NKSP: Added built-in function "change_amp_lfo_depth()".
* NKSP: Added built-in function "change_amp_lfo_freq()".
* NKSP: Added built-in function "change_pitch_lfo_depth()".
* NKSP: Added built-in function "change_pitch_lfo_freq()".
* Bumped version (2.0.0.svn44).

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2012 Christian Schoenebeck and Grigor Iliev *
8 * Copyright (C) 2013-2016 Christian Schoenebeck and Andreas Persson *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License as published by *
12 * the Free Software Foundation; either version 2 of the License, or *
13 * (at your option) any later version. *
14 * *
15 * This program is distributed in the hope that it will be useful, *
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
18 * GNU General Public License for more details. *
19 * *
20 * You should have received a copy of the GNU General Public License *
21 * along with this program; if not, write to the Free Software *
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
23 * MA 02111-1307 USA *
24 ***************************************************************************/
25
26 #include "AbstractVoice.h"
27
28 namespace LinuxSampler {
29
30 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
31 pEngineChannel = NULL;
32 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
33 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
34 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
35 PlaybackState = playback_state_end;
36 SynthesisMode = 0; // set all mode bits to 0 first
37 // select synthesis implementation (asm core is not supported ATM)
38 #if 0 // CONFIG_ASM && ARCH_X86
39 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
40 #else
41 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
42 #endif
43 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
44
45 finalSynthesisParameters.filterLeft.Reset();
46 finalSynthesisParameters.filterRight.Reset();
47
48 pEq = NULL;
49 bEqSupport = false;
50 }
51
52 AbstractVoice::~AbstractVoice() {
53 if (pLFO1) delete pLFO1;
54 if (pLFO2) delete pLFO2;
55 if (pLFO3) delete pLFO3;
56
57 if(pEq != NULL) delete pEq;
58 }
59
60 void AbstractVoice::CreateEq() {
61 if(!bEqSupport) return;
62 if(pEq != NULL) delete pEq;
63 pEq = new EqSupport;
64 pEq->InitEffect(GetEngine()->pAudioOutputDevice);
65 }
66
67 /**
68 * Resets voice variables. Should only be called if rendering process is
69 * suspended / not running.
70 */
71 void AbstractVoice::Reset() {
72 finalSynthesisParameters.filterLeft.Reset();
73 finalSynthesisParameters.filterRight.Reset();
74 DiskStreamRef.pStream = NULL;
75 DiskStreamRef.hStream = 0;
76 DiskStreamRef.State = Stream::state_unused;
77 DiskStreamRef.OrderID = 0;
78 PlaybackState = playback_state_end;
79 itTriggerEvent = Pool<Event>::Iterator();
80 itKillEvent = Pool<Event>::Iterator();
81 }
82
83 /**
84 * Initializes and triggers the voice, a disk stream will be launched if
85 * needed.
86 *
87 * @param pEngineChannel - engine channel on which this voice was ordered
88 * @param itNoteOnEvent - event that caused triggering of this voice
89 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
90 * @param pRegion- points to the region which provides sample wave(s) and articulation data
91 * @param VoiceType - type of this voice
92 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
93 * @returns 0 on success, a value < 0 if the voice wasn't triggered
94 * (either due to an error or e.g. because no region is
95 * defined for the given key)
96 */
97 int AbstractVoice::Trigger (
98 AbstractEngineChannel* pEngineChannel,
99 Pool<Event>::Iterator& itNoteOnEvent,
100 int PitchBend,
101 type_t VoiceType,
102 int iKeyGroup
103 ) {
104 this->pEngineChannel = pEngineChannel;
105 Orphan = false;
106
107 #if CONFIG_DEVMODE
108 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
109 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
110 }
111 #endif // CONFIG_DEVMODE
112
113 Type = VoiceType;
114 pNote = pEngineChannel->pEngine->NoteByID( itNoteOnEvent->Param.Note.ID );
115 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
116 Delay = itNoteOnEvent->FragmentPos();
117 itTriggerEvent = itNoteOnEvent;
118 itKillEvent = Pool<Event>::Iterator();
119 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey());
120
121 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
122
123 SmplInfo = GetSampleInfo();
124 RgnInfo = GetRegionInfo();
125 InstrInfo = GetInstrumentInfo();
126
127 MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest);
128
129 AboutToTrigger();
130
131 // calculate volume
132 const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity);
133 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
134 if (volume <= 0) return -1;
135
136 // select channel mode (mono or stereo)
137 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
138 // select bit depth (16 or 24)
139 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
140
141 // get starting crossfade volume level
142 float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity);
143
144 VolumeLeft = volume * pKeyInfo->PanLeft;
145 VolumeRight = volume * pKeyInfo->PanRight;
146
147 // this rate is used for rather mellow volume fades
148 const float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
149 // this rate is used for very fast volume fades
150 const float quickRampRate = RTMath::Min(subfragmentRate, GetEngine()->SampleRate * 0.001f /* 1ms */);
151 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
152
153 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
154 NoteVolumeSmoother.trigger(pNote ? pNote->Override.Volume : 1.f, quickRampRate);
155
156 // Check if the sample needs disk streaming or is too short for that
157 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
158 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
159
160 SetSampleStartOffset();
161
162 if (DiskVoice) { // voice to be streamed from disk
163 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
164 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
165 } else {
166 // The cache is too small to fit a max sample buffer.
167 // Setting MaxRAMPos to 0 will probably cause a click
168 // in the audio, but it's better than not handling
169 // this case at all, which would have caused the
170 // unsigned MaxRAMPos to be set to a negative number.
171 MaxRAMPos = 0;
172 }
173
174 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
175 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
176
177 if (OrderNewStream()) return -1;
178 dmsg(4,("Disk voice launched (cached samples: %ld, total Samples: %d, MaxRAMPos: %lu, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
179 }
180 else { // RAM only voice
181 MaxRAMPos = cachedsamples;
182 RAMLoop = (SmplInfo.HasLoops);
183 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
184 }
185 if (RAMLoop) {
186 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
187 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
188 loop.uiStart = SmplInfo.LoopStart;
189 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
190 loop.uiSize = SmplInfo.LoopLength;
191 }
192
193 Pitch = CalculatePitchInfo(PitchBend);
194 NotePitch = (pNote) ? pNote->Override.Pitch : 1.0f;
195 NoteCutoff = (pNote) ? pNote->Override.Cutoff : 1.0f;
196 NoteResonance = (pNote) ? pNote->Override.Resonance : 1.0f;
197
198 // the length of the decay and release curves are dependent on the velocity
199 const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity);
200
201 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
202 // get current value of EG1 controller
203 double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity);
204
205 // calculate influence of EG1 controller on EG1's parameters
206 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
207
208 if (pNote) {
209 egInfo.Attack *= pNote->Override.Attack;
210 egInfo.Decay *= pNote->Override.Decay;
211 egInfo.Release *= pNote->Override.Release;
212 }
213
214 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
215 } else {
216 pSignalUnitRack->Trigger();
217 }
218
219 const uint8_t pan = (pSignalUnitRack) ? pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan) : MIDIPan;
220 NotePanLeft = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 0 /*left*/ ) : 1.f;
221 NotePanRight = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 1 /*right*/) : 1.f;
222 PanLeftSmoother.trigger(
223 AbstractEngine::PanCurve[128 - pan] * NotePanLeft,
224 quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate)
225 );
226 PanRightSmoother.trigger(
227 AbstractEngine::PanCurve[pan] * NotePanRight,
228 quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate)
229 );
230
231 #ifdef CONFIG_INTERPOLATE_VOLUME
232 // setup initial volume in synthesis parameters
233 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
234 if (pEngineChannel->GetMute()) {
235 finalSynthesisParameters.fFinalVolumeLeft = 0;
236 finalSynthesisParameters.fFinalVolumeRight = 0;
237 }
238 else
239 #else
240 {
241 float finalVolume;
242 if (pSignalUnitRack == NULL) {
243 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
244 } else {
245 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
246 }
247
248 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * PanLeftSmoother.render();
249 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * PanRightSmoother.render();
250 }
251 #endif
252 #endif
253
254 if (pSignalUnitRack == NULL) {
255 // setup EG 2 (VCF Cutoff EG)
256 {
257 // get current value of EG2 controller
258 double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity);
259
260 // calculate influence of EG2 controller on EG2's parameters
261 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
262
263 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
264 }
265
266
267 // setup EG 3 (VCO EG)
268 {
269 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
270 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
271 float eg3depth = (bPortamento)
272 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey()) * 100)
273 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
274 float eg3time = (bPortamento)
275 ? pEngineChannel->PortamentoTime
276 : RgnInfo.EG3Attack;
277 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
278 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
279 }
280
281
282 // setup LFO 1 (VCA LFO)
283 InitLFO1();
284 // setup LFO 2 (VCF Cutoff LFO)
285 InitLFO2();
286 // setup LFO 3 (VCO LFO)
287 InitLFO3();
288 }
289
290
291 #if CONFIG_FORCE_FILTER
292 const bool bUseFilter = true;
293 #else // use filter only if instrument file told so
294 const bool bUseFilter = RgnInfo.VCFEnabled;
295 #endif // CONFIG_FORCE_FILTER
296 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
297 if (bUseFilter) {
298 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
299 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
300 #else // use the one defined in the instrument file
301 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
302 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
303
304 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
305 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
306 #else // use the one defined in the instrument file
307 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
308 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
309
310 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
311 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
312 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
313 #else // override filter type
314 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
315 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
316 #endif // CONFIG_OVERRIDE_FILTER_TYPE
317
318 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
319 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
320
321 // calculate cutoff frequency
322 CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity);
323
324 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
325
326 // calculate resonance
327 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
328 VCFResonanceCtrl.fvalue = resonance;
329 } else {
330 VCFCutoffCtrl.controller = 0;
331 VCFResonanceCtrl.controller = 0;
332 }
333
334 const bool bEq =
335 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
336
337 if (bEq) {
338 pEq->GetInChannelLeft()->Clear();
339 pEq->GetInChannelRight()->Clear();
340 pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle());
341 }
342
343 return 0; // success
344 }
345
346 void AbstractVoice::SetSampleStartOffset() {
347 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
348 Pos = RgnInfo.SampleStartOffset;
349 }
350
351 /**
352 * Synthesizes the current audio fragment for this voice.
353 *
354 * @param Samples - number of sample points to be rendered in this audio
355 * fragment cycle
356 * @param pSrc - pointer to input sample data
357 * @param Skip - number of sample points to skip in output buffer
358 */
359 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
360 bool delay = false; // Whether the voice playback should be delayed for this call
361
362 if (pSignalUnitRack != NULL) {
363 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
364 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
365 if (delaySteps >= Samples) {
366 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
367 delay = true;
368 } else {
369 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
370 Samples -= delaySteps;
371 Skip += delaySteps;
372 }
373 }
374 }
375
376 AbstractEngineChannel* pChannel = pEngineChannel;
377 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey());
378
379 const bool bVoiceRequiresDedicatedRouting =
380 pEngineChannel->GetFxSendCount() > 0 &&
381 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
382
383 const bool bEq =
384 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
385
386 if (bEq) {
387 pEq->GetInChannelLeft()->Clear();
388 pEq->GetInChannelRight()->Clear();
389 finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip];
390 finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip];
391 pSignalUnitRack->UpdateEqSettings(pEq);
392 } else if (bVoiceRequiresDedicatedRouting) {
393 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
394 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
395 } else {
396 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
397 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
398 }
399 finalSynthesisParameters.pSrc = pSrc;
400
401 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
402 RTList<Event>::Iterator itNoteEvent;
403 GetFirstEventOnKey(HostKey(), itNoteEvent);
404
405 RTList<Event>::Iterator itGroupEvent;
406 if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first();
407
408 if (itTriggerEvent) { // skip events that happened before this voice was triggered
409 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
410 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
411
412 // we can't simply compare the timestamp here, because note events
413 // might happen on the same time stamp, so we have to deal on the
414 // actual sequence the note events arrived instead (see bug #112)
415 for (; itNoteEvent; ++itNoteEvent) {
416 if (itTriggerEvent == itNoteEvent) {
417 ++itNoteEvent;
418 break;
419 }
420 }
421 }
422
423 uint killPos = 0;
424 if (itKillEvent) {
425 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
426 if (maxFadeOutPos < 0) {
427 // There's not enough space in buffer to do a fade out
428 // from max volume (this can only happen for audio
429 // drivers that use Samples < MaxSamplesPerCycle).
430 // End the EG1 here, at pos 0, with a shorter max fade
431 // out time.
432 if (pSignalUnitRack == NULL) {
433 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
434 } else {
435 pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
436 }
437 itKillEvent = Pool<Event>::Iterator();
438 } else {
439 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
440 }
441 }
442
443 uint i = Skip;
444 while (i < Samples) {
445 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
446
447 // initialize all final synthesis parameters
448 fFinalCutoff = VCFCutoffCtrl.fvalue;
449 fFinalResonance = VCFResonanceCtrl.fvalue;
450
451 // process MIDI control change, aftertouch and pitchbend events for this subfragment
452 processCCEvents(itCCEvent, iSubFragmentEnd);
453 uint8_t pan = MIDIPan;
454 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan);
455
456 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan] * NotePanLeft);
457 PanRightSmoother.update(AbstractEngine::PanCurve[pan] * NotePanRight);
458
459 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend * NotePitch;
460
461 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render() * NoteVolumeSmoother.render();
462 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
463 if (pChannel->GetMute()) fFinalVolume = 0;
464 #endif
465
466 // process transition events (note on, note off & sustain pedal)
467 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
468 processGroupEvents(itGroupEvent, iSubFragmentEnd);
469
470 if (pSignalUnitRack == NULL) {
471 // if the voice was killed in this subfragment, or if the
472 // filter EG is finished, switch EG1 to fade out stage
473 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
474 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
475 pEG2->getSegmentType() == EG::segment_end)) {
476 pEG1->enterFadeOutStage();
477 itKillEvent = Pool<Event>::Iterator();
478 }
479
480 // process envelope generators
481 switch (pEG1->getSegmentType()) {
482 case EG::segment_lin:
483 fFinalVolume *= pEG1->processLin();
484 break;
485 case EG::segment_exp:
486 fFinalVolume *= pEG1->processExp();
487 break;
488 case EG::segment_end:
489 fFinalVolume *= pEG1->getLevel();
490 break; // noop
491 case EG::segment_pow:
492 fFinalVolume *= pEG1->processPow();
493 break;
494 }
495 switch (pEG2->getSegmentType()) {
496 case EG::segment_lin:
497 fFinalCutoff *= pEG2->processLin();
498 break;
499 case EG::segment_exp:
500 fFinalCutoff *= pEG2->processExp();
501 break;
502 case EG::segment_end:
503 fFinalCutoff *= pEG2->getLevel();
504 break; // noop
505 case EG::segment_pow:
506 fFinalCutoff *= pEG2->processPow();
507 break;
508 }
509 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
510
511 // process low frequency oscillators
512 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
513 if (bLFO2Enabled) fFinalCutoff *= (1.0f - pLFO2->render());
514 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
515 } else {
516 // if the voice was killed in this subfragment, enter fade out stage
517 if (itKillEvent && killPos <= iSubFragmentEnd) {
518 pSignalUnitRack->EnterFadeOutStage();
519 itKillEvent = Pool<Event>::Iterator();
520 }
521
522 // if the filter EG is finished, switch EG1 to fade out stage
523 /*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
524 pEG2->getSegmentType() == EG::segment_end) {
525 pEG1->enterFadeOutStage();
526 itKillEvent = Pool<Event>::Iterator();
527 }*/
528 // TODO: ^^^
529
530 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
531 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
532 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
533
534 finalSynthesisParameters.fFinalPitch =
535 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
536
537 }
538
539 fFinalCutoff *= NoteCutoff;
540 fFinalResonance *= NoteResonance;
541
542 // limit the pitch so we don't read outside the buffer
543 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
544
545 // if filter enabled then update filter coefficients
546 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
547 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
548 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
549 }
550
551 // do we need resampling?
552 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
553 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
554 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
555 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
556 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
557
558 // prepare final synthesis parameters structure
559 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
560 #ifdef CONFIG_INTERPOLATE_VOLUME
561 finalSynthesisParameters.fFinalVolumeDeltaLeft =
562 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
563 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
564 finalSynthesisParameters.fFinalVolumeDeltaRight =
565 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
566 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
567 #else
568 finalSynthesisParameters.fFinalVolumeLeft =
569 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
570 finalSynthesisParameters.fFinalVolumeRight =
571 fFinalVolume * VolumeRight * PanRightSmoother.render();
572 #endif
573 // render audio for one subfragment
574 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
575
576 if (pSignalUnitRack == NULL) {
577 // stop the rendering if volume EG is finished
578 if (pEG1->getSegmentType() == EG::segment_end) break;
579 } else {
580 // stop the rendering if the endpoint unit is not active
581 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
582 }
583
584 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
585
586 if (pSignalUnitRack == NULL) {
587 // increment envelopes' positions
588 if (pEG1->active()) {
589
590 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
591 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
592 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
593 }
594
595 pEG1->increment(1);
596 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
597 }
598 if (pEG2->active()) {
599 pEG2->increment(1);
600 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
601 }
602 EG3.increment(1);
603 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
604 } else {
605 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
606 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
607 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
608 }*/
609 // TODO: ^^^
610
611 if (!delay) pSignalUnitRack->Increment();
612 }
613
614 Pos = newPos;
615 i = iSubFragmentEnd;
616 }
617
618 if (delay) return;
619
620 if (bVoiceRequiresDedicatedRouting) {
621 if (bEq) {
622 pEq->RenderAudio(Samples);
623 pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
624 pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
625 }
626 optional<float> effectSendLevels[2] = {
627 pMidiKeyInfo->ReverbSend,
628 pMidiKeyInfo->ChorusSend
629 };
630 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
631 } else if (bEq) {
632 pEq->RenderAudio(Samples);
633 pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples);
634 pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples);
635 }
636 }
637
638 /**
639 * Process given list of MIDI control change, aftertouch and pitch bend
640 * events for the given time.
641 *
642 * @param itEvent - iterator pointing to the next event to be processed
643 * @param End - youngest time stamp where processing should be stopped
644 */
645 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
646 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
647 if ((itEvent->Type == Event::type_control_change || itEvent->Type == Event::type_channel_pressure)
648 && itEvent->Param.CC.Controller) // if (valid) MIDI control change event
649 {
650 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
651 ProcessCutoffEvent(itEvent);
652 }
653 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
654 processResonanceEvent(itEvent);
655 }
656 if (itEvent->Param.CC.Controller == CTRL_TABLE_IDX_AFTERTOUCH ||
657 itEvent->Type == Event::type_channel_pressure)
658 {
659 ProcessChannelPressureEvent(itEvent);
660 }
661 if (pSignalUnitRack == NULL) {
662 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
663 pLFO1->updateByMIDICtrlValue(itEvent->Param.CC.Value);
664 }
665 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
666 pLFO2->updateByMIDICtrlValue(itEvent->Param.CC.Value);
667 }
668 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
669 pLFO3->updateByMIDICtrlValue(itEvent->Param.CC.Value);
670 }
671 }
672 if (itEvent->Param.CC.Controller == 7) { // volume
673 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
674 } else if (itEvent->Param.CC.Controller == 10) { // panpot
675 MIDIPan = CalculatePan(itEvent->Param.CC.Value);
676 }
677 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
678 processPitchEvent(itEvent);
679 } else if (itEvent->Type == Event::type_note_pressure) {
680 ProcessPolyphonicKeyPressureEvent(itEvent);
681 }
682
683 ProcessCCEvent(itEvent);
684 if (pSignalUnitRack != NULL) {
685 pSignalUnitRack->ProcessCCEvent(itEvent);
686 }
687 }
688 }
689
690 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
691 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
692 }
693
694 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
695 // convert absolute controller value to differential
696 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
697 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
698 const float resonancedelta = (float) ctrldelta;
699 fFinalResonance += resonancedelta;
700 // needed for initialization of parameter
701 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
702 }
703
704 /**
705 * Process given list of MIDI note on, note off, sustain pedal events and
706 * note synthesis parameter events for the given time.
707 *
708 * @param itEvent - iterator pointing to the next event to be processed
709 * @param End - youngest time stamp where processing should be stopped
710 */
711 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
712 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
713 // some voice types ignore note off
714 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
715 if (itEvent->Type == Event::type_release_key) {
716 EnterReleaseStage();
717 } else if (itEvent->Type == Event::type_cancel_release_key) {
718 if (pSignalUnitRack == NULL) {
719 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
720 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
721 } else {
722 pSignalUnitRack->CancelRelease();
723 }
724 }
725 }
726 // process stop-note events (caused by built-in instrument script function note_off())
727 if (itEvent->Type == Event::type_release_note && pNote &&
728 pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote)
729 {
730 EnterReleaseStage();
731 }
732 // process synthesis parameter events (caused by built-in realt-time instrument script functions)
733 if (itEvent->Type == Event::type_note_synth_param && pNote &&
734 pEngineChannel->pEngine->NoteByID( itEvent->Param.NoteSynthParam.NoteID ) == pNote)
735 {
736 switch (itEvent->Param.NoteSynthParam.Type) {
737 case Event::synth_param_volume:
738 NoteVolumeSmoother.update(itEvent->Param.NoteSynthParam.AbsValue);
739 break;
740 case Event::synth_param_pitch:
741 NotePitch = itEvent->Param.NoteSynthParam.AbsValue;
742 break;
743 case Event::synth_param_pan:
744 NotePanLeft = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 0 /*left*/);
745 NotePanRight = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 1 /*right*/);
746 break;
747 case Event::synth_param_cutoff:
748 NoteCutoff = itEvent->Param.NoteSynthParam.AbsValue;
749 break;
750 case Event::synth_param_resonance:
751 NoteResonance = itEvent->Param.NoteSynthParam.AbsValue;
752 break;
753 case Event::synth_param_amp_lfo_depth:
754 pLFO1->setScriptDepthFactor(itEvent->Param.NoteSynthParam.AbsValue);
755 break;
756 case Event::synth_param_amp_lfo_freq:
757 pLFO1->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
758 break;
759 case Event::synth_param_pitch_lfo_depth:
760 pLFO3->setScriptDepthFactor(itEvent->Param.NoteSynthParam.AbsValue);
761 break;
762 case Event::synth_param_pitch_lfo_freq:
763 pLFO3->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
764 break;
765
766 case Event::synth_param_attack:
767 case Event::synth_param_decay:
768 case Event::synth_param_release:
769 break; // noop
770 }
771 }
772 }
773 }
774
775 /**
776 * Process given list of events aimed at all voices in a key group.
777 *
778 * @param itEvent - iterator pointing to the next event to be processed
779 * @param End - youngest time stamp where processing should be stopped
780 */
781 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
782 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
783 ProcessGroupEvent(itEvent);
784 }
785 }
786
787 /** @brief Update current portamento position.
788 *
789 * Will be called when portamento mode is enabled to get the final
790 * portamento position of this active voice from where the next voice(s)
791 * might continue to slide on.
792 *
793 * @param itNoteOffEvent - event which causes this voice to die soon
794 */
795 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
796 if (pSignalUnitRack == NULL) {
797 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
798 pEngineChannel->PortamentoPos = (float) MIDIKey() + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
799 } else {
800 // TODO:
801 }
802 }
803
804 /**
805 * Kill the voice in regular sense. Let the voice render audio until
806 * the kill event actually occured and then fade down the volume level
807 * very quickly and let the voice die finally. Unlike a normal release
808 * of a voice, a kill process cannot be cancalled and is therefore
809 * usually used for voice stealing and key group conflicts.
810 *
811 * @param itKillEvent - event which caused the voice to be killed
812 */
813 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
814 #if CONFIG_DEVMODE
815 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
816 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
817 #endif // CONFIG_DEVMODE
818
819 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
820 this->itKillEvent = itKillEvent;
821 }
822
823 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
824 PitchInfo pitch;
825 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
826
827 // GSt behaviour: maximum transpose up is 40 semitones. If
828 // MIDI key is more than 40 semitones above unity note,
829 // the transpose is not done.
830 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
831
832 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
833 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
834 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
835
836 return pitch;
837 }
838
839 void AbstractVoice::onScaleTuningChanged() {
840 PitchInfo pitch = this->Pitch;
841 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
842
843 // GSt behaviour: maximum transpose up is 40 semitones. If
844 // MIDI key is more than 40 semitones above unity note,
845 // the transpose is not done.
846 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
847
848 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
849 this->Pitch = pitch;
850 }
851
852 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
853 // For 16 bit samples, we downscale by 32768 to convert from
854 // int16 value range to DSP value range (which is
855 // -1.0..1.0). For 24 bit, we downscale from int32.
856 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
857
858 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
859
860 // the volume of release triggered samples depends on note length
861 if (Type & Voice::type_release_trigger) {
862 float noteLength = float(GetEngine()->FrameTime + Delay -
863 GetNoteOnTime(MIDIKey()) ) / GetEngine()->SampleRate;
864
865 volume *= GetReleaseTriggerAttenuation(noteLength);
866 }
867
868 return volume;
869 }
870
871 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
872 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
873 }
874
875 void AbstractVoice::EnterReleaseStage() {
876 if (pSignalUnitRack == NULL) {
877 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
878 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
879 } else {
880 pSignalUnitRack->EnterReleaseStage();
881 }
882 }
883
884 bool AbstractVoice::EG1Finished() {
885 if (pSignalUnitRack == NULL) {
886 return pEG1->getSegmentType() == EG::segment_end;
887 } else {
888 return !pSignalUnitRack->GetEndpointUnit()->Active();
889 }
890 }
891
892 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC