/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2963 - (show annotations) (download)
Sun Jul 17 18:41:21 2016 UTC (7 years, 8 months ago) by schoenebeck
File size: 42420 byte(s)
* All engines: Increased ramp speed of volume smoother and pan smoother
  (while slow rate of crossfade smoother is preserved) to allow quick
  volume and pan changes by instrument scripts for instance.
* Bumped version (2.0.0.svn21).

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2015 Christian Schoenebeck and Grigor Iliev *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details. *
18 * *
19 * You should have received a copy of the GNU General Public License *
20 * along with this program; if not, write to the Free Software *
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
22 * MA 02111-1307 USA *
23 ***************************************************************************/
24
25 #include "AbstractVoice.h"
26
27 namespace LinuxSampler {
28
29 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
30 pEngineChannel = NULL;
31 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
32 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
33 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
34 PlaybackState = playback_state_end;
35 SynthesisMode = 0; // set all mode bits to 0 first
36 // select synthesis implementation (asm core is not supported ATM)
37 #if 0 // CONFIG_ASM && ARCH_X86
38 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
39 #else
40 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
41 #endif
42 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
43
44 finalSynthesisParameters.filterLeft.Reset();
45 finalSynthesisParameters.filterRight.Reset();
46
47 pEq = NULL;
48 bEqSupport = false;
49 }
50
51 AbstractVoice::~AbstractVoice() {
52 if (pLFO1) delete pLFO1;
53 if (pLFO2) delete pLFO2;
54 if (pLFO3) delete pLFO3;
55
56 if(pEq != NULL) delete pEq;
57 }
58
59 void AbstractVoice::CreateEq() {
60 if(!bEqSupport) return;
61 if(pEq != NULL) delete pEq;
62 pEq = new EqSupport;
63 pEq->InitEffect(GetEngine()->pAudioOutputDevice);
64 }
65
66 /**
67 * Resets voice variables. Should only be called if rendering process is
68 * suspended / not running.
69 */
70 void AbstractVoice::Reset() {
71 finalSynthesisParameters.filterLeft.Reset();
72 finalSynthesisParameters.filterRight.Reset();
73 DiskStreamRef.pStream = NULL;
74 DiskStreamRef.hStream = 0;
75 DiskStreamRef.State = Stream::state_unused;
76 DiskStreamRef.OrderID = 0;
77 PlaybackState = playback_state_end;
78 itTriggerEvent = Pool<Event>::Iterator();
79 itKillEvent = Pool<Event>::Iterator();
80 }
81
82 /**
83 * Initializes and triggers the voice, a disk stream will be launched if
84 * needed.
85 *
86 * @param pEngineChannel - engine channel on which this voice was ordered
87 * @param itNoteOnEvent - event that caused triggering of this voice
88 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
89 * @param pRegion- points to the region which provides sample wave(s) and articulation data
90 * @param VoiceType - type of this voice
91 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
92 * @returns 0 on success, a value < 0 if the voice wasn't triggered
93 * (either due to an error or e.g. because no region is
94 * defined for the given key)
95 */
96 int AbstractVoice::Trigger (
97 AbstractEngineChannel* pEngineChannel,
98 Pool<Event>::Iterator& itNoteOnEvent,
99 int PitchBend,
100 type_t VoiceType,
101 int iKeyGroup
102 ) {
103 this->pEngineChannel = pEngineChannel;
104 Orphan = false;
105
106 #if CONFIG_DEVMODE
107 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
108 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
109 }
110 #endif // CONFIG_DEVMODE
111
112 Type = VoiceType;
113 pNote = pEngineChannel->pEngine->NoteByID( itNoteOnEvent->Param.Note.ID );
114 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
115 Delay = itNoteOnEvent->FragmentPos();
116 itTriggerEvent = itNoteOnEvent;
117 itKillEvent = Pool<Event>::Iterator();
118 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey());
119
120 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
121
122 SmplInfo = GetSampleInfo();
123 RgnInfo = GetRegionInfo();
124 InstrInfo = GetInstrumentInfo();
125
126 MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest);
127
128 AboutToTrigger();
129
130 // calculate volume
131 const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity);
132 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
133 if (volume <= 0) return -1;
134
135 // select channel mode (mono or stereo)
136 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
137 // select bit depth (16 or 24)
138 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
139
140 // get starting crossfade volume level
141 float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity);
142
143 VolumeLeft = volume * pKeyInfo->PanLeft;
144 VolumeRight = volume * pKeyInfo->PanRight;
145
146 // this rate is used for rather mellow volume fades
147 const float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
148 // this rate is used for very fast volume fades
149 const float quickRampRate = RTMath::Min(subfragmentRate, GetEngine()->SampleRate * 0.001f /* 1ms */);
150 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
151
152 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
153 NoteVolumeSmoother.trigger(pNote ? pNote->Override.Volume : 1.f, quickRampRate);
154
155 // Check if the sample needs disk streaming or is too short for that
156 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
157 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
158
159 SetSampleStartOffset();
160
161 if (DiskVoice) { // voice to be streamed from disk
162 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
163 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
164 } else {
165 // The cache is too small to fit a max sample buffer.
166 // Setting MaxRAMPos to 0 will probably cause a click
167 // in the audio, but it's better than not handling
168 // this case at all, which would have caused the
169 // unsigned MaxRAMPos to be set to a negative number.
170 MaxRAMPos = 0;
171 }
172
173 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
174 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
175
176 if (OrderNewStream()) return -1;
177 dmsg(4,("Disk voice launched (cached samples: %ld, total Samples: %d, MaxRAMPos: %lu, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
178 }
179 else { // RAM only voice
180 MaxRAMPos = cachedsamples;
181 RAMLoop = (SmplInfo.HasLoops);
182 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
183 }
184 if (RAMLoop) {
185 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
186 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
187 loop.uiStart = SmplInfo.LoopStart;
188 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
189 loop.uiSize = SmplInfo.LoopLength;
190 }
191
192 Pitch = CalculatePitchInfo(PitchBend);
193 NotePitch = (pNote) ? pNote->Override.Pitch : 1.0f;
194 NoteCutoff = (pNote) ? pNote->Override.Cutoff : 1.0f;
195 NoteResonance = (pNote) ? pNote->Override.Resonance : 1.0f;
196
197 // the length of the decay and release curves are dependent on the velocity
198 const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity);
199
200 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
201 // get current value of EG1 controller
202 double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity);
203
204 // calculate influence of EG1 controller on EG1's parameters
205 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
206
207 if (pNote) {
208 egInfo.Attack *= pNote->Override.Attack;
209 egInfo.Decay *= pNote->Override.Decay;
210 egInfo.Release *= pNote->Override.Release;
211 }
212
213 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
214 } else {
215 pSignalUnitRack->Trigger();
216 }
217
218 const uint8_t pan = (pSignalUnitRack) ? pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan) : MIDIPan;
219 NotePanLeft = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 0 /*left*/ ) : 1.f;
220 NotePanRight = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 1 /*right*/) : 1.f;
221 PanLeftSmoother.trigger(
222 AbstractEngine::PanCurve[128 - pan] * NotePanLeft,
223 quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate)
224 );
225 PanRightSmoother.trigger(
226 AbstractEngine::PanCurve[pan] * NotePanRight,
227 quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate)
228 );
229
230 #ifdef CONFIG_INTERPOLATE_VOLUME
231 // setup initial volume in synthesis parameters
232 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
233 if (pEngineChannel->GetMute()) {
234 finalSynthesisParameters.fFinalVolumeLeft = 0;
235 finalSynthesisParameters.fFinalVolumeRight = 0;
236 }
237 else
238 #else
239 {
240 float finalVolume;
241 if (pSignalUnitRack == NULL) {
242 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
243 } else {
244 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
245 }
246
247 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * PanLeftSmoother.render();
248 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * PanRightSmoother.render();
249 }
250 #endif
251 #endif
252
253 if (pSignalUnitRack == NULL) {
254 // setup EG 2 (VCF Cutoff EG)
255 {
256 // get current value of EG2 controller
257 double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity);
258
259 // calculate influence of EG2 controller on EG2's parameters
260 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
261
262 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
263 }
264
265
266 // setup EG 3 (VCO EG)
267 {
268 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
269 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
270 float eg3depth = (bPortamento)
271 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey()) * 100)
272 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
273 float eg3time = (bPortamento)
274 ? pEngineChannel->PortamentoTime
275 : RgnInfo.EG3Attack;
276 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
277 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
278 }
279
280
281 // setup LFO 1 (VCA LFO)
282 InitLFO1();
283 // setup LFO 2 (VCF Cutoff LFO)
284 InitLFO2();
285 // setup LFO 3 (VCO LFO)
286 InitLFO3();
287 }
288
289
290 #if CONFIG_FORCE_FILTER
291 const bool bUseFilter = true;
292 #else // use filter only if instrument file told so
293 const bool bUseFilter = RgnInfo.VCFEnabled;
294 #endif // CONFIG_FORCE_FILTER
295 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
296 if (bUseFilter) {
297 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
298 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
299 #else // use the one defined in the instrument file
300 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
301 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
302
303 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
304 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
305 #else // use the one defined in the instrument file
306 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
307 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
308
309 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
310 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
311 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
312 #else // override filter type
313 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
314 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
315 #endif // CONFIG_OVERRIDE_FILTER_TYPE
316
317 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
318 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
319
320 // calculate cutoff frequency
321 CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity);
322
323 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
324
325 // calculate resonance
326 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
327 VCFResonanceCtrl.fvalue = resonance;
328 } else {
329 VCFCutoffCtrl.controller = 0;
330 VCFResonanceCtrl.controller = 0;
331 }
332
333 const bool bEq =
334 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
335
336 if (bEq) {
337 pEq->GetInChannelLeft()->Clear();
338 pEq->GetInChannelRight()->Clear();
339 pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle());
340 }
341
342 return 0; // success
343 }
344
345 void AbstractVoice::SetSampleStartOffset() {
346 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
347 Pos = RgnInfo.SampleStartOffset;
348 }
349
350 /**
351 * Synthesizes the current audio fragment for this voice.
352 *
353 * @param Samples - number of sample points to be rendered in this audio
354 * fragment cycle
355 * @param pSrc - pointer to input sample data
356 * @param Skip - number of sample points to skip in output buffer
357 */
358 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
359 bool delay = false; // Whether the voice playback should be delayed for this call
360
361 if (pSignalUnitRack != NULL) {
362 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
363 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
364 if (delaySteps >= Samples) {
365 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
366 delay = true;
367 } else {
368 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
369 Samples -= delaySteps;
370 Skip += delaySteps;
371 }
372 }
373 }
374
375 AbstractEngineChannel* pChannel = pEngineChannel;
376 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey());
377
378 const bool bVoiceRequiresDedicatedRouting =
379 pEngineChannel->GetFxSendCount() > 0 &&
380 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
381
382 const bool bEq =
383 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
384
385 if (bEq) {
386 pEq->GetInChannelLeft()->Clear();
387 pEq->GetInChannelRight()->Clear();
388 finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip];
389 finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip];
390 pSignalUnitRack->UpdateEqSettings(pEq);
391 } else if (bVoiceRequiresDedicatedRouting) {
392 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
393 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
394 } else {
395 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
396 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
397 }
398 finalSynthesisParameters.pSrc = pSrc;
399
400 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
401 RTList<Event>::Iterator itNoteEvent;
402 GetFirstEventOnKey(HostKey(), itNoteEvent);
403
404 RTList<Event>::Iterator itGroupEvent;
405 if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first();
406
407 if (itTriggerEvent) { // skip events that happened before this voice was triggered
408 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
409 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
410
411 // we can't simply compare the timestamp here, because note events
412 // might happen on the same time stamp, so we have to deal on the
413 // actual sequence the note events arrived instead (see bug #112)
414 for (; itNoteEvent; ++itNoteEvent) {
415 if (itTriggerEvent == itNoteEvent) {
416 ++itNoteEvent;
417 break;
418 }
419 }
420 }
421
422 uint killPos;
423 if (itKillEvent) {
424 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
425 if (maxFadeOutPos < 0) {
426 // There's not enough space in buffer to do a fade out
427 // from max volume (this can only happen for audio
428 // drivers that use Samples < MaxSamplesPerCycle).
429 // End the EG1 here, at pos 0, with a shorter max fade
430 // out time.
431 if (pSignalUnitRack == NULL) {
432 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
433 } else {
434 pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
435 }
436 itKillEvent = Pool<Event>::Iterator();
437 } else {
438 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
439 }
440 }
441
442 uint i = Skip;
443 while (i < Samples) {
444 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
445
446 // initialize all final synthesis parameters
447 fFinalCutoff = VCFCutoffCtrl.fvalue;
448 fFinalResonance = VCFResonanceCtrl.fvalue;
449
450 // process MIDI control change, aftertouch and pitchbend events for this subfragment
451 processCCEvents(itCCEvent, iSubFragmentEnd);
452 uint8_t pan = MIDIPan;
453 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan);
454
455 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan] * NotePanLeft);
456 PanRightSmoother.update(AbstractEngine::PanCurve[pan] * NotePanRight);
457
458 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend * NotePitch;
459
460 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render() * NoteVolumeSmoother.render();
461 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
462 if (pChannel->GetMute()) fFinalVolume = 0;
463 #endif
464
465 // process transition events (note on, note off & sustain pedal)
466 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
467 processGroupEvents(itGroupEvent, iSubFragmentEnd);
468
469 if (pSignalUnitRack == NULL) {
470 // if the voice was killed in this subfragment, or if the
471 // filter EG is finished, switch EG1 to fade out stage
472 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
473 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
474 pEG2->getSegmentType() == EG::segment_end)) {
475 pEG1->enterFadeOutStage();
476 itKillEvent = Pool<Event>::Iterator();
477 }
478
479 // process envelope generators
480 switch (pEG1->getSegmentType()) {
481 case EG::segment_lin:
482 fFinalVolume *= pEG1->processLin();
483 break;
484 case EG::segment_exp:
485 fFinalVolume *= pEG1->processExp();
486 break;
487 case EG::segment_end:
488 fFinalVolume *= pEG1->getLevel();
489 break; // noop
490 case EG::segment_pow:
491 fFinalVolume *= pEG1->processPow();
492 break;
493 }
494 switch (pEG2->getSegmentType()) {
495 case EG::segment_lin:
496 fFinalCutoff *= pEG2->processLin();
497 break;
498 case EG::segment_exp:
499 fFinalCutoff *= pEG2->processExp();
500 break;
501 case EG::segment_end:
502 fFinalCutoff *= pEG2->getLevel();
503 break; // noop
504 case EG::segment_pow:
505 fFinalCutoff *= pEG2->processPow();
506 break;
507 }
508 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
509
510 // process low frequency oscillators
511 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
512 if (bLFO2Enabled) fFinalCutoff *= (1.0f - pLFO2->render());
513 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
514 } else {
515 // if the voice was killed in this subfragment, enter fade out stage
516 if (itKillEvent && killPos <= iSubFragmentEnd) {
517 pSignalUnitRack->EnterFadeOutStage();
518 itKillEvent = Pool<Event>::Iterator();
519 }
520
521 // if the filter EG is finished, switch EG1 to fade out stage
522 /*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
523 pEG2->getSegmentType() == EG::segment_end) {
524 pEG1->enterFadeOutStage();
525 itKillEvent = Pool<Event>::Iterator();
526 }*/
527 // TODO: ^^^
528
529 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
530 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
531 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
532
533 finalSynthesisParameters.fFinalPitch =
534 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
535
536 }
537
538 fFinalCutoff *= NoteCutoff;
539 fFinalResonance *= NoteResonance;
540
541 // limit the pitch so we don't read outside the buffer
542 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
543
544 // if filter enabled then update filter coefficients
545 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
546 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
547 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
548 }
549
550 // do we need resampling?
551 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
552 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
553 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
554 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
555 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
556
557 // prepare final synthesis parameters structure
558 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
559 #ifdef CONFIG_INTERPOLATE_VOLUME
560 finalSynthesisParameters.fFinalVolumeDeltaLeft =
561 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
562 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
563 finalSynthesisParameters.fFinalVolumeDeltaRight =
564 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
565 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
566 #else
567 finalSynthesisParameters.fFinalVolumeLeft =
568 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
569 finalSynthesisParameters.fFinalVolumeRight =
570 fFinalVolume * VolumeRight * PanRightSmoother.render();
571 #endif
572 // render audio for one subfragment
573 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
574
575 if (pSignalUnitRack == NULL) {
576 // stop the rendering if volume EG is finished
577 if (pEG1->getSegmentType() == EG::segment_end) break;
578 } else {
579 // stop the rendering if the endpoint unit is not active
580 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
581 }
582
583 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
584
585 if (pSignalUnitRack == NULL) {
586 // increment envelopes' positions
587 if (pEG1->active()) {
588
589 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
590 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
591 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
592 }
593
594 pEG1->increment(1);
595 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
596 }
597 if (pEG2->active()) {
598 pEG2->increment(1);
599 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
600 }
601 EG3.increment(1);
602 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
603 } else {
604 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
605 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
606 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
607 }*/
608 // TODO: ^^^
609
610 if (!delay) pSignalUnitRack->Increment();
611 }
612
613 Pos = newPos;
614 i = iSubFragmentEnd;
615 }
616
617 if (delay) return;
618
619 if (bVoiceRequiresDedicatedRouting) {
620 if (bEq) {
621 pEq->RenderAudio(Samples);
622 pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
623 pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
624 }
625 optional<float> effectSendLevels[2] = {
626 pMidiKeyInfo->ReverbSend,
627 pMidiKeyInfo->ChorusSend
628 };
629 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
630 } else if (bEq) {
631 pEq->RenderAudio(Samples);
632 pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples);
633 pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples);
634 }
635 }
636
637 /**
638 * Process given list of MIDI control change, aftertouch and pitch bend
639 * events for the given time.
640 *
641 * @param itEvent - iterator pointing to the next event to be processed
642 * @param End - youngest time stamp where processing should be stopped
643 */
644 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
645 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
646 if (itEvent->Type == Event::type_control_change && itEvent->Param.CC.Controller) { // if (valid) MIDI control change event
647 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
648 ProcessCutoffEvent(itEvent);
649 }
650 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
651 processResonanceEvent(itEvent);
652 }
653 if (pSignalUnitRack == NULL) {
654 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
655 pLFO1->update(itEvent->Param.CC.Value);
656 }
657 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
658 pLFO2->update(itEvent->Param.CC.Value);
659 }
660 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
661 pLFO3->update(itEvent->Param.CC.Value);
662 }
663 }
664 if (itEvent->Param.CC.Controller == 7) { // volume
665 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
666 } else if (itEvent->Param.CC.Controller == 10) { // panpot
667 MIDIPan = CalculatePan(itEvent->Param.CC.Value);
668 }
669 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
670 processPitchEvent(itEvent);
671 } else if (itEvent->Type == Event::type_channel_pressure) {
672 ProcessChannelPressureEvent(itEvent);
673 } else if (itEvent->Type == Event::type_note_pressure) {
674 ProcessPolyphonicKeyPressureEvent(itEvent);
675 }
676
677 ProcessCCEvent(itEvent);
678 if (pSignalUnitRack != NULL) {
679 pSignalUnitRack->ProcessCCEvent(itEvent);
680 }
681 }
682 }
683
684 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
685 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
686 }
687
688 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
689 // convert absolute controller value to differential
690 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
691 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
692 const float resonancedelta = (float) ctrldelta;
693 fFinalResonance += resonancedelta;
694 // needed for initialization of parameter
695 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
696 }
697
698 /**
699 * Process given list of MIDI note on, note off, sustain pedal events and
700 * note synthesis parameter events for the given time.
701 *
702 * @param itEvent - iterator pointing to the next event to be processed
703 * @param End - youngest time stamp where processing should be stopped
704 */
705 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
706 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
707 // some voice types ignore note off
708 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
709 if (itEvent->Type == Event::type_release_key) {
710 EnterReleaseStage();
711 } else if (itEvent->Type == Event::type_cancel_release_key) {
712 if (pSignalUnitRack == NULL) {
713 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
714 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
715 } else {
716 pSignalUnitRack->CancelRelease();
717 }
718 }
719 }
720 // process stop-note events (caused by built-in instrument script function note_off())
721 if (itEvent->Type == Event::type_release_note && pNote &&
722 pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote)
723 {
724 EnterReleaseStage();
725 }
726 // process synthesis parameter events (caused by built-in realt-time instrument script functions)
727 if (itEvent->Type == Event::type_note_synth_param && pNote &&
728 pEngineChannel->pEngine->NoteByID( itEvent->Param.NoteSynthParam.NoteID ) == pNote)
729 {
730 switch (itEvent->Param.NoteSynthParam.Type) {
731 case Event::synth_param_volume:
732 NoteVolumeSmoother.update(itEvent->Param.NoteSynthParam.AbsValue);
733 break;
734 case Event::synth_param_pitch:
735 NotePitch = itEvent->Param.NoteSynthParam.AbsValue;
736 break;
737 case Event::synth_param_pan:
738 NotePanLeft = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 0 /*left*/);
739 NotePanRight = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 1 /*right*/);
740 break;
741 case Event::synth_param_cutoff:
742 NoteCutoff = itEvent->Param.NoteSynthParam.AbsValue;
743 break;
744 case Event::synth_param_resonance:
745 NoteResonance = itEvent->Param.NoteSynthParam.AbsValue;
746 break;
747 }
748 }
749 }
750 }
751
752 /**
753 * Process given list of events aimed at all voices in a key group.
754 *
755 * @param itEvent - iterator pointing to the next event to be processed
756 * @param End - youngest time stamp where processing should be stopped
757 */
758 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
759 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
760 ProcessGroupEvent(itEvent);
761 }
762 }
763
764 /** @brief Update current portamento position.
765 *
766 * Will be called when portamento mode is enabled to get the final
767 * portamento position of this active voice from where the next voice(s)
768 * might continue to slide on.
769 *
770 * @param itNoteOffEvent - event which causes this voice to die soon
771 */
772 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
773 if (pSignalUnitRack == NULL) {
774 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
775 pEngineChannel->PortamentoPos = (float) MIDIKey() + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
776 } else {
777 // TODO:
778 }
779 }
780
781 /**
782 * Kill the voice in regular sense. Let the voice render audio until
783 * the kill event actually occured and then fade down the volume level
784 * very quickly and let the voice die finally. Unlike a normal release
785 * of a voice, a kill process cannot be cancalled and is therefore
786 * usually used for voice stealing and key group conflicts.
787 *
788 * @param itKillEvent - event which caused the voice to be killed
789 */
790 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
791 #if CONFIG_DEVMODE
792 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
793 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
794 #endif // CONFIG_DEVMODE
795
796 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
797 this->itKillEvent = itKillEvent;
798 }
799
800 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
801 PitchInfo pitch;
802 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
803
804 // GSt behaviour: maximum transpose up is 40 semitones. If
805 // MIDI key is more than 40 semitones above unity note,
806 // the transpose is not done.
807 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
808
809 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
810 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
811 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
812
813 return pitch;
814 }
815
816 void AbstractVoice::onScaleTuningChanged() {
817 PitchInfo pitch = this->Pitch;
818 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
819
820 // GSt behaviour: maximum transpose up is 40 semitones. If
821 // MIDI key is more than 40 semitones above unity note,
822 // the transpose is not done.
823 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
824
825 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
826 this->Pitch = pitch;
827 }
828
829 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
830 // For 16 bit samples, we downscale by 32768 to convert from
831 // int16 value range to DSP value range (which is
832 // -1.0..1.0). For 24 bit, we downscale from int32.
833 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
834
835 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
836
837 // the volume of release triggered samples depends on note length
838 if (Type & Voice::type_release_trigger) {
839 float noteLength = float(GetEngine()->FrameTime + Delay -
840 GetNoteOnTime(MIDIKey()) ) / GetEngine()->SampleRate;
841
842 volume *= GetReleaseTriggerAttenuation(noteLength);
843 }
844
845 return volume;
846 }
847
848 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
849 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
850 }
851
852 void AbstractVoice::EnterReleaseStage() {
853 if (pSignalUnitRack == NULL) {
854 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
855 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
856 } else {
857 pSignalUnitRack->EnterReleaseStage();
858 }
859 }
860
861 bool AbstractVoice::EG1Finished() {
862 if (pSignalUnitRack == NULL) {
863 return pEG1->getSegmentType() == EG::segment_end;
864 } else {
865 return !pSignalUnitRack->GetEndpointUnit()->Active();
866 }
867 }
868
869 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC