/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3214 - (show annotations) (download)
Thu May 25 14:46:47 2017 UTC (6 years, 11 months ago) by schoenebeck
File size: 44641 byte(s)
* NKSP: Implemented built-in script function "change_velo()".
* NKSP: Implemented built-in script function "change_note()".
* Bumped version (2.0.0.svn49).

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2012 Christian Schoenebeck and Grigor Iliev *
8 * Copyright (C) 2013-2016 Christian Schoenebeck and Andreas Persson *
9 * *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License as published by *
12 * the Free Software Foundation; either version 2 of the License, or *
13 * (at your option) any later version. *
14 * *
15 * This program is distributed in the hope that it will be useful, *
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
18 * GNU General Public License for more details. *
19 * *
20 * You should have received a copy of the GNU General Public License *
21 * along with this program; if not, write to the Free Software *
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
23 * MA 02111-1307 USA *
24 ***************************************************************************/
25
26 #include "AbstractVoice.h"
27
28 namespace LinuxSampler {
29
30 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
31 pEngineChannel = NULL;
32 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
33 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
34 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
35 PlaybackState = playback_state_end;
36 SynthesisMode = 0; // set all mode bits to 0 first
37 // select synthesis implementation (asm core is not supported ATM)
38 #if 0 // CONFIG_ASM && ARCH_X86
39 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
40 #else
41 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
42 #endif
43 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
44
45 finalSynthesisParameters.filterLeft.Reset();
46 finalSynthesisParameters.filterRight.Reset();
47
48 pEq = NULL;
49 bEqSupport = false;
50 }
51
52 AbstractVoice::~AbstractVoice() {
53 if (pLFO1) delete pLFO1;
54 if (pLFO2) delete pLFO2;
55 if (pLFO3) delete pLFO3;
56
57 if(pEq != NULL) delete pEq;
58 }
59
60 void AbstractVoice::CreateEq() {
61 if(!bEqSupport) return;
62 if(pEq != NULL) delete pEq;
63 pEq = new EqSupport;
64 pEq->InitEffect(GetEngine()->pAudioOutputDevice);
65 }
66
67 /**
68 * Resets voice variables. Should only be called if rendering process is
69 * suspended / not running.
70 */
71 void AbstractVoice::Reset() {
72 finalSynthesisParameters.filterLeft.Reset();
73 finalSynthesisParameters.filterRight.Reset();
74 DiskStreamRef.pStream = NULL;
75 DiskStreamRef.hStream = 0;
76 DiskStreamRef.State = Stream::state_unused;
77 DiskStreamRef.OrderID = 0;
78 PlaybackState = playback_state_end;
79 itTriggerEvent = Pool<Event>::Iterator();
80 itKillEvent = Pool<Event>::Iterator();
81 }
82
83 /**
84 * Initializes and triggers the voice, a disk stream will be launched if
85 * needed.
86 *
87 * @param pEngineChannel - engine channel on which this voice was ordered
88 * @param itNoteOnEvent - event that caused triggering of this voice
89 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
90 * @param pRegion- points to the region which provides sample wave(s) and articulation data
91 * @param VoiceType - type of this voice
92 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
93 * @returns 0 on success, a value < 0 if the voice wasn't triggered
94 * (either due to an error or e.g. because no region is
95 * defined for the given key)
96 */
97 int AbstractVoice::Trigger (
98 AbstractEngineChannel* pEngineChannel,
99 Pool<Event>::Iterator& itNoteOnEvent,
100 int PitchBend,
101 type_t VoiceType,
102 int iKeyGroup
103 ) {
104 this->pEngineChannel = pEngineChannel;
105 Orphan = false;
106
107 #if CONFIG_DEVMODE
108 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
109 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
110 }
111 #endif // CONFIG_DEVMODE
112
113 Type = VoiceType;
114 pNote = pEngineChannel->pEngine->NoteByID( itNoteOnEvent->Param.Note.ID );
115 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
116 Delay = itNoteOnEvent->FragmentPos();
117 itTriggerEvent = itNoteOnEvent;
118 itKillEvent = Pool<Event>::Iterator();
119 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey());
120
121 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
122
123 SmplInfo = GetSampleInfo();
124 RgnInfo = GetRegionInfo();
125 InstrInfo = GetInstrumentInfo();
126
127 MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest);
128
129 AboutToTrigger();
130
131 // calculate volume
132 const double velocityAttenuation = GetVelocityAttenuation(MIDIVelocity());
133 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
134 if (volume <= 0) return -1;
135
136 // select channel mode (mono or stereo)
137 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
138 // select bit depth (16 or 24)
139 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
140
141 // get starting crossfade volume level
142 float crossfadeVolume = CalculateCrossfadeVolume(MIDIVelocity());
143
144 VolumeLeft = volume * pKeyInfo->PanLeft;
145 VolumeRight = volume * pKeyInfo->PanRight;
146
147 // this rate is used for rather mellow volume fades
148 const float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
149 // this rate is used for very fast volume fades
150 const float quickRampRate = RTMath::Min(subfragmentRate, GetEngine()->SampleRate * 0.001f /* approx. 13ms */);
151 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
152
153 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
154 NoteVolume.setCurrentValue(pNote ? pNote->Override.Volume : 1.f);
155 NoteVolume.setDefaultDuration(pNote ? pNote->Override.VolumeTime : DEFAULT_NOTE_VOLUME_TIME_S);
156
157 // Check if the sample needs disk streaming or is too short for that
158 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
159 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
160
161 SetSampleStartOffset();
162
163 if (DiskVoice) { // voice to be streamed from disk
164 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
165 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
166 } else {
167 // The cache is too small to fit a max sample buffer.
168 // Setting MaxRAMPos to 0 will probably cause a click
169 // in the audio, but it's better than not handling
170 // this case at all, which would have caused the
171 // unsigned MaxRAMPos to be set to a negative number.
172 MaxRAMPos = 0;
173 }
174
175 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
176 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
177
178 if (OrderNewStream()) return -1;
179 dmsg(4,("Disk voice launched (cached samples: %ld, total Samples: %d, MaxRAMPos: %lu, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
180 }
181 else { // RAM only voice
182 MaxRAMPos = cachedsamples;
183 RAMLoop = (SmplInfo.HasLoops);
184 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
185 }
186 if (RAMLoop) {
187 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
188 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
189 loop.uiStart = SmplInfo.LoopStart;
190 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
191 loop.uiSize = SmplInfo.LoopLength;
192 }
193
194 Pitch = CalculatePitchInfo(PitchBend);
195 NotePitch.setCurrentValue(pNote ? pNote->Override.Pitch : 1.0f);
196 NotePitch.setDefaultDuration(pNote ? pNote->Override.PitchTime : DEFAULT_NOTE_PITCH_TIME_S);
197 NoteCutoff = (pNote) ? pNote->Override.Cutoff : 1.0f;
198 NoteResonance = (pNote) ? pNote->Override.Resonance : 1.0f;
199
200 // the length of the decay and release curves are dependent on the velocity
201 const double velrelease = 1 / GetVelocityRelease(MIDIVelocity());
202
203 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
204 // get current value of EG1 controller
205 double eg1controllervalue = GetEG1ControllerValue(MIDIVelocity());
206
207 // calculate influence of EG1 controller on EG1's parameters
208 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
209
210 if (pNote) {
211 egInfo.Attack *= pNote->Override.Attack;
212 egInfo.Decay *= pNote->Override.Decay;
213 egInfo.Release *= pNote->Override.Release;
214 }
215
216 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, MIDIVelocity());
217 } else {
218 pSignalUnitRack->Trigger();
219 }
220
221 const uint8_t pan = (pSignalUnitRack) ? pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan) : MIDIPan;
222 NotePanLeft = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 0 /*left*/ ) : 1.f;
223 NotePanRight = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 1 /*right*/) : 1.f;
224 PanLeftSmoother.trigger(
225 AbstractEngine::PanCurve[128 - pan] * NotePanLeft,
226 quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate)
227 );
228 PanRightSmoother.trigger(
229 AbstractEngine::PanCurve[pan] * NotePanRight,
230 quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate)
231 );
232
233 #ifdef CONFIG_INTERPOLATE_VOLUME
234 // setup initial volume in synthesis parameters
235 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
236 if (pEngineChannel->GetMute()) {
237 finalSynthesisParameters.fFinalVolumeLeft = 0;
238 finalSynthesisParameters.fFinalVolumeRight = 0;
239 }
240 else
241 #else
242 {
243 float finalVolume;
244 if (pSignalUnitRack == NULL) {
245 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
246 } else {
247 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
248 }
249
250 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * PanLeftSmoother.render();
251 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * PanRightSmoother.render();
252 }
253 #endif
254 #endif
255
256 if (pSignalUnitRack == NULL) {
257 // setup EG 2 (VCF Cutoff EG)
258 {
259 // get current value of EG2 controller
260 double eg2controllervalue = GetEG2ControllerValue(MIDIVelocity());
261
262 // calculate influence of EG2 controller on EG2's parameters
263 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
264
265 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, MIDIVelocity());
266 }
267
268
269 // setup EG 3 (VCO EG)
270 {
271 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
272 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
273 float eg3depth = (bPortamento)
274 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey()) * 100)
275 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
276 float eg3time = (bPortamento)
277 ? pEngineChannel->PortamentoTime
278 : RgnInfo.EG3Attack;
279 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
280 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
281 }
282
283
284 // setup LFO 1 (VCA LFO)
285 InitLFO1();
286 // setup LFO 2 (VCF Cutoff LFO)
287 InitLFO2();
288 // setup LFO 3 (VCO LFO)
289 InitLFO3();
290 }
291
292
293 #if CONFIG_FORCE_FILTER
294 const bool bUseFilter = true;
295 #else // use filter only if instrument file told so
296 const bool bUseFilter = RgnInfo.VCFEnabled;
297 #endif // CONFIG_FORCE_FILTER
298 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
299 if (bUseFilter) {
300 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
301 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
302 #else // use the one defined in the instrument file
303 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
304 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
305
306 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
307 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
308 #else // use the one defined in the instrument file
309 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
310 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
311
312 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
313 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
314 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
315 #else // override filter type
316 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
317 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
318 #endif // CONFIG_OVERRIDE_FILTER_TYPE
319
320 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
321 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
322
323 // calculate cutoff frequency
324 CutoffBase = CalculateCutoffBase(MIDIVelocity());
325
326 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
327
328 // calculate resonance
329 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
330 VCFResonanceCtrl.fvalue = resonance;
331 } else {
332 VCFCutoffCtrl.controller = 0;
333 VCFResonanceCtrl.controller = 0;
334 }
335
336 const bool bEq =
337 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
338
339 if (bEq) {
340 pEq->GetInChannelLeft()->Clear();
341 pEq->GetInChannelRight()->Clear();
342 pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle());
343 }
344
345 return 0; // success
346 }
347
348 void AbstractVoice::SetSampleStartOffset() {
349 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
350 Pos = RgnInfo.SampleStartOffset;
351 }
352
353 /**
354 * Synthesizes the current audio fragment for this voice.
355 *
356 * @param Samples - number of sample points to be rendered in this audio
357 * fragment cycle
358 * @param pSrc - pointer to input sample data
359 * @param Skip - number of sample points to skip in output buffer
360 */
361 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
362 bool delay = false; // Whether the voice playback should be delayed for this call
363
364 if (pSignalUnitRack != NULL) {
365 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
366 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
367 if (delaySteps >= Samples) {
368 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
369 delay = true;
370 } else {
371 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
372 Samples -= delaySteps;
373 Skip += delaySteps;
374 }
375 }
376 }
377
378 AbstractEngineChannel* pChannel = pEngineChannel;
379 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey());
380
381 const bool bVoiceRequiresDedicatedRouting =
382 pEngineChannel->GetFxSendCount() > 0 &&
383 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
384
385 const bool bEq =
386 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
387
388 if (bEq) {
389 pEq->GetInChannelLeft()->Clear();
390 pEq->GetInChannelRight()->Clear();
391 finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip];
392 finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip];
393 pSignalUnitRack->UpdateEqSettings(pEq);
394 } else if (bVoiceRequiresDedicatedRouting) {
395 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
396 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
397 } else {
398 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
399 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
400 }
401 finalSynthesisParameters.pSrc = pSrc;
402
403 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
404 RTList<Event>::Iterator itNoteEvent;
405 GetFirstEventOnKey(HostKey(), itNoteEvent);
406
407 RTList<Event>::Iterator itGroupEvent;
408 if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first();
409
410 if (itTriggerEvent) { // skip events that happened before this voice was triggered
411 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
412 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
413
414 // we can't simply compare the timestamp here, because note events
415 // might happen on the same time stamp, so we have to deal on the
416 // actual sequence the note events arrived instead (see bug #112)
417 for (; itNoteEvent; ++itNoteEvent) {
418 if (itTriggerEvent == itNoteEvent) {
419 ++itNoteEvent;
420 break;
421 }
422 }
423 }
424
425 uint killPos = 0;
426 if (itKillEvent) {
427 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
428 if (maxFadeOutPos < 0) {
429 // There's not enough space in buffer to do a fade out
430 // from max volume (this can only happen for audio
431 // drivers that use Samples < MaxSamplesPerCycle).
432 // End the EG1 here, at pos 0, with a shorter max fade
433 // out time.
434 if (pSignalUnitRack == NULL) {
435 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
436 } else {
437 pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
438 }
439 itKillEvent = Pool<Event>::Iterator();
440 } else {
441 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
442 }
443 }
444
445 uint i = Skip;
446 while (i < Samples) {
447 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
448
449 // initialize all final synthesis parameters
450 fFinalCutoff = VCFCutoffCtrl.fvalue;
451 fFinalResonance = VCFResonanceCtrl.fvalue;
452
453 // process MIDI control change, aftertouch and pitchbend events for this subfragment
454 processCCEvents(itCCEvent, iSubFragmentEnd);
455 uint8_t pan = MIDIPan;
456 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan);
457
458 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan] * NotePanLeft);
459 PanRightSmoother.update(AbstractEngine::PanCurve[pan] * NotePanRight);
460
461 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend * NotePitch.render();
462
463 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render() * NoteVolume.render();
464 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
465 if (pChannel->GetMute()) fFinalVolume = 0;
466 #endif
467
468 // process transition events (note on, note off & sustain pedal)
469 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
470 processGroupEvents(itGroupEvent, iSubFragmentEnd);
471
472 if (pSignalUnitRack == NULL) {
473 // if the voice was killed in this subfragment, or if the
474 // filter EG is finished, switch EG1 to fade out stage
475 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
476 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
477 pEG2->getSegmentType() == EG::segment_end)) {
478 pEG1->enterFadeOutStage();
479 itKillEvent = Pool<Event>::Iterator();
480 }
481
482 // process envelope generators
483 switch (pEG1->getSegmentType()) {
484 case EG::segment_lin:
485 fFinalVolume *= pEG1->processLin();
486 break;
487 case EG::segment_exp:
488 fFinalVolume *= pEG1->processExp();
489 break;
490 case EG::segment_end:
491 fFinalVolume *= pEG1->getLevel();
492 break; // noop
493 case EG::segment_pow:
494 fFinalVolume *= pEG1->processPow();
495 break;
496 }
497 switch (pEG2->getSegmentType()) {
498 case EG::segment_lin:
499 fFinalCutoff *= pEG2->processLin();
500 break;
501 case EG::segment_exp:
502 fFinalCutoff *= pEG2->processExp();
503 break;
504 case EG::segment_end:
505 fFinalCutoff *= pEG2->getLevel();
506 break; // noop
507 case EG::segment_pow:
508 fFinalCutoff *= pEG2->processPow();
509 break;
510 }
511 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
512
513 // process low frequency oscillators
514 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
515 if (bLFO2Enabled) fFinalCutoff *= (1.0f - pLFO2->render());
516 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
517 } else {
518 // if the voice was killed in this subfragment, enter fade out stage
519 if (itKillEvent && killPos <= iSubFragmentEnd) {
520 pSignalUnitRack->EnterFadeOutStage();
521 itKillEvent = Pool<Event>::Iterator();
522 }
523
524 // if the filter EG is finished, switch EG1 to fade out stage
525 /*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
526 pEG2->getSegmentType() == EG::segment_end) {
527 pEG1->enterFadeOutStage();
528 itKillEvent = Pool<Event>::Iterator();
529 }*/
530 // TODO: ^^^
531
532 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
533 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
534 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
535
536 finalSynthesisParameters.fFinalPitch =
537 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
538
539 }
540
541 fFinalCutoff *= NoteCutoff;
542 fFinalResonance *= NoteResonance;
543
544 // limit the pitch so we don't read outside the buffer
545 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
546
547 // if filter enabled then update filter coefficients
548 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
549 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
550 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
551 }
552
553 // do we need resampling?
554 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
555 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
556 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
557 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
558 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
559
560 // prepare final synthesis parameters structure
561 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
562 #ifdef CONFIG_INTERPOLATE_VOLUME
563 finalSynthesisParameters.fFinalVolumeDeltaLeft =
564 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
565 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
566 finalSynthesisParameters.fFinalVolumeDeltaRight =
567 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
568 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
569 #else
570 finalSynthesisParameters.fFinalVolumeLeft =
571 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
572 finalSynthesisParameters.fFinalVolumeRight =
573 fFinalVolume * VolumeRight * PanRightSmoother.render();
574 #endif
575 // render audio for one subfragment
576 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
577
578 if (pSignalUnitRack == NULL) {
579 // stop the rendering if volume EG is finished
580 if (pEG1->getSegmentType() == EG::segment_end) break;
581 } else {
582 // stop the rendering if the endpoint unit is not active
583 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
584 }
585
586 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
587
588 if (pSignalUnitRack == NULL) {
589 // increment envelopes' positions
590 if (pEG1->active()) {
591
592 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
593 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
594 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
595 }
596
597 pEG1->increment(1);
598 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
599 }
600 if (pEG2->active()) {
601 pEG2->increment(1);
602 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
603 }
604 EG3.increment(1);
605 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
606 } else {
607 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
608 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
609 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
610 }*/
611 // TODO: ^^^
612
613 if (!delay) pSignalUnitRack->Increment();
614 }
615
616 Pos = newPos;
617 i = iSubFragmentEnd;
618 }
619
620 if (delay) return;
621
622 if (bVoiceRequiresDedicatedRouting) {
623 if (bEq) {
624 pEq->RenderAudio(Samples);
625 pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
626 pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
627 }
628 optional<float> effectSendLevels[2] = {
629 pMidiKeyInfo->ReverbSend,
630 pMidiKeyInfo->ChorusSend
631 };
632 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
633 } else if (bEq) {
634 pEq->RenderAudio(Samples);
635 pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples);
636 pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples);
637 }
638 }
639
640 /**
641 * Process given list of MIDI control change, aftertouch and pitch bend
642 * events for the given time.
643 *
644 * @param itEvent - iterator pointing to the next event to be processed
645 * @param End - youngest time stamp where processing should be stopped
646 */
647 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
648 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
649 if ((itEvent->Type == Event::type_control_change || itEvent->Type == Event::type_channel_pressure)
650 && itEvent->Param.CC.Controller) // if (valid) MIDI control change event
651 {
652 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
653 ProcessCutoffEvent(itEvent);
654 }
655 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
656 processResonanceEvent(itEvent);
657 }
658 if (itEvent->Param.CC.Controller == CTRL_TABLE_IDX_AFTERTOUCH ||
659 itEvent->Type == Event::type_channel_pressure)
660 {
661 ProcessChannelPressureEvent(itEvent);
662 }
663 if (pSignalUnitRack == NULL) {
664 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
665 pLFO1->updateByMIDICtrlValue(itEvent->Param.CC.Value);
666 }
667 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
668 pLFO2->updateByMIDICtrlValue(itEvent->Param.CC.Value);
669 }
670 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
671 pLFO3->updateByMIDICtrlValue(itEvent->Param.CC.Value);
672 }
673 }
674 if (itEvent->Param.CC.Controller == 7) { // volume
675 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
676 } else if (itEvent->Param.CC.Controller == 10) { // panpot
677 MIDIPan = CalculatePan(itEvent->Param.CC.Value);
678 }
679 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
680 processPitchEvent(itEvent);
681 } else if (itEvent->Type == Event::type_note_pressure) {
682 ProcessPolyphonicKeyPressureEvent(itEvent);
683 }
684
685 ProcessCCEvent(itEvent);
686 if (pSignalUnitRack != NULL) {
687 pSignalUnitRack->ProcessCCEvent(itEvent);
688 }
689 }
690 }
691
692 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
693 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
694 }
695
696 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
697 // convert absolute controller value to differential
698 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
699 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
700 const float resonancedelta = (float) ctrldelta;
701 fFinalResonance += resonancedelta;
702 // needed for initialization of parameter
703 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
704 }
705
706 /**
707 * Process given list of MIDI note on, note off, sustain pedal events and
708 * note synthesis parameter events for the given time.
709 *
710 * @param itEvent - iterator pointing to the next event to be processed
711 * @param End - youngest time stamp where processing should be stopped
712 */
713 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
714 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
715 // some voice types ignore note off
716 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
717 if (itEvent->Type == Event::type_release_key) {
718 EnterReleaseStage();
719 } else if (itEvent->Type == Event::type_cancel_release_key) {
720 if (pSignalUnitRack == NULL) {
721 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
722 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
723 } else {
724 pSignalUnitRack->CancelRelease();
725 }
726 }
727 }
728 // process stop-note events (caused by built-in instrument script function note_off())
729 if (itEvent->Type == Event::type_release_note && pNote &&
730 pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote)
731 {
732 EnterReleaseStage();
733 }
734 // process kill-note events (caused by built-in instrument script function fade_out())
735 if (itEvent->Type == Event::type_kill_note && pNote &&
736 pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote)
737 {
738 Kill(itEvent);
739 }
740 // process synthesis parameter events (caused by built-in realt-time instrument script functions)
741 if (itEvent->Type == Event::type_note_synth_param && pNote &&
742 pEngineChannel->pEngine->NoteByID( itEvent->Param.NoteSynthParam.NoteID ) == pNote)
743 {
744 switch (itEvent->Param.NoteSynthParam.Type) {
745 case Event::synth_param_volume:
746 NoteVolume.fadeTo(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
747 break;
748 case Event::synth_param_volume_time:
749 NoteVolume.setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue);
750 break;
751 case Event::synth_param_pitch:
752 NotePitch.fadeTo(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
753 break;
754 case Event::synth_param_pitch_time:
755 NotePitch.setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue);
756 break;
757 case Event::synth_param_pan:
758 NotePanLeft = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 0 /*left*/);
759 NotePanRight = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 1 /*right*/);
760 break;
761 case Event::synth_param_cutoff:
762 NoteCutoff = itEvent->Param.NoteSynthParam.AbsValue;
763 break;
764 case Event::synth_param_resonance:
765 NoteResonance = itEvent->Param.NoteSynthParam.AbsValue;
766 break;
767 case Event::synth_param_amp_lfo_depth:
768 pLFO1->setScriptDepthFactor(itEvent->Param.NoteSynthParam.AbsValue);
769 break;
770 case Event::synth_param_amp_lfo_freq:
771 pLFO1->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
772 break;
773 case Event::synth_param_pitch_lfo_depth:
774 pLFO3->setScriptDepthFactor(itEvent->Param.NoteSynthParam.AbsValue);
775 break;
776 case Event::synth_param_pitch_lfo_freq:
777 pLFO3->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
778 break;
779
780 case Event::synth_param_attack:
781 case Event::synth_param_decay:
782 case Event::synth_param_release:
783 break; // noop
784 }
785 }
786 }
787 }
788
789 /**
790 * Process given list of events aimed at all voices in a key group.
791 *
792 * @param itEvent - iterator pointing to the next event to be processed
793 * @param End - youngest time stamp where processing should be stopped
794 */
795 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
796 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
797 ProcessGroupEvent(itEvent);
798 }
799 }
800
801 /** @brief Update current portamento position.
802 *
803 * Will be called when portamento mode is enabled to get the final
804 * portamento position of this active voice from where the next voice(s)
805 * might continue to slide on.
806 *
807 * @param itNoteOffEvent - event which causes this voice to die soon
808 */
809 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
810 if (pSignalUnitRack == NULL) {
811 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
812 pEngineChannel->PortamentoPos = (float) MIDIKey() + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
813 } else {
814 // TODO:
815 }
816 }
817
818 /**
819 * Kill the voice in regular sense. Let the voice render audio until
820 * the kill event actually occured and then fade down the volume level
821 * very quickly and let the voice die finally. Unlike a normal release
822 * of a voice, a kill process cannot be cancalled and is therefore
823 * usually used for voice stealing and key group conflicts.
824 *
825 * @param itKillEvent - event which caused the voice to be killed
826 */
827 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
828 #if CONFIG_DEVMODE
829 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
830 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
831 #endif // CONFIG_DEVMODE
832
833 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
834 this->itKillEvent = itKillEvent;
835 }
836
837 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
838 PitchInfo pitch;
839 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
840
841 // GSt behaviour: maximum transpose up is 40 semitones. If
842 // MIDI key is more than 40 semitones above unity note,
843 // the transpose is not done.
844 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
845
846 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
847 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
848 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
849
850 return pitch;
851 }
852
853 void AbstractVoice::onScaleTuningChanged() {
854 PitchInfo pitch = this->Pitch;
855 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
856
857 // GSt behaviour: maximum transpose up is 40 semitones. If
858 // MIDI key is more than 40 semitones above unity note,
859 // the transpose is not done.
860 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
861
862 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
863 this->Pitch = pitch;
864 }
865
866 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
867 // For 16 bit samples, we downscale by 32768 to convert from
868 // int16 value range to DSP value range (which is
869 // -1.0..1.0). For 24 bit, we downscale from int32.
870 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
871
872 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
873
874 // the volume of release triggered samples depends on note length
875 if (Type & Voice::type_release_trigger) {
876 float noteLength = float(GetEngine()->FrameTime + Delay -
877 GetNoteOnTime(MIDIKey()) ) / GetEngine()->SampleRate;
878
879 volume *= GetReleaseTriggerAttenuation(noteLength);
880 }
881
882 return volume;
883 }
884
885 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
886 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
887 }
888
889 void AbstractVoice::EnterReleaseStage() {
890 if (pSignalUnitRack == NULL) {
891 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
892 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
893 } else {
894 pSignalUnitRack->EnterReleaseStage();
895 }
896 }
897
898 bool AbstractVoice::EG1Finished() {
899 if (pSignalUnitRack == NULL) {
900 return pEG1->getSegmentType() == EG::segment_end;
901 } else {
902 return !pSignalUnitRack->GetEndpointUnit()->Active();
903 }
904 }
905
906 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC