/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2322 - (show annotations) (download)
Sun Feb 26 09:09:19 2012 UTC (12 years, 1 month ago) by iliev
File size: 38213 byte(s)
* bugfix: voice stealing didn't work for SFZ and SF2 engines

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2011 Christian Schoenebeck and Grigor Iliev *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details. *
18 * *
19 * You should have received a copy of the GNU General Public License *
20 * along with this program; if not, write to the Free Software *
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
22 * MA 02111-1307 USA *
23 ***************************************************************************/
24
25 #include "AbstractVoice.h"
26
27 namespace LinuxSampler {
28
29 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
30 pEngineChannel = NULL;
31 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
32 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
33 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
34 PlaybackState = playback_state_end;
35 SynthesisMode = 0; // set all mode bits to 0 first
36 // select synthesis implementation (asm core is not supported ATM)
37 #if 0 // CONFIG_ASM && ARCH_X86
38 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
39 #else
40 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
41 #endif
42 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
43
44 finalSynthesisParameters.filterLeft.Reset();
45 finalSynthesisParameters.filterRight.Reset();
46
47 pEq = NULL;
48 bEqSupport = false;
49 }
50
51 AbstractVoice::~AbstractVoice() {
52 if (pLFO1) delete pLFO1;
53 if (pLFO2) delete pLFO2;
54 if (pLFO3) delete pLFO3;
55
56 if(pEq != NULL) delete pEq;
57 }
58
59 void AbstractVoice::CreateEq() {
60 if(!bEqSupport) return;
61 if(pEq != NULL) delete pEq;
62 pEq = new EqSupport;
63 pEq->InitEffect(GetEngine()->pAudioOutputDevice);
64 }
65
66 /**
67 * Resets voice variables. Should only be called if rendering process is
68 * suspended / not running.
69 */
70 void AbstractVoice::Reset() {
71 finalSynthesisParameters.filterLeft.Reset();
72 finalSynthesisParameters.filterRight.Reset();
73 DiskStreamRef.pStream = NULL;
74 DiskStreamRef.hStream = 0;
75 DiskStreamRef.State = Stream::state_unused;
76 DiskStreamRef.OrderID = 0;
77 PlaybackState = playback_state_end;
78 itTriggerEvent = Pool<Event>::Iterator();
79 itKillEvent = Pool<Event>::Iterator();
80 }
81
82 /**
83 * Initializes and triggers the voice, a disk stream will be launched if
84 * needed.
85 *
86 * @param pEngineChannel - engine channel on which this voice was ordered
87 * @param itNoteOnEvent - event that caused triggering of this voice
88 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
89 * @param pRegion- points to the region which provides sample wave(s) and articulation data
90 * @param VoiceType - type of this voice
91 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
92 * @returns 0 on success, a value < 0 if the voice wasn't triggered
93 * (either due to an error or e.g. because no region is
94 * defined for the given key)
95 */
96 int AbstractVoice::Trigger (
97 AbstractEngineChannel* pEngineChannel,
98 Pool<Event>::Iterator& itNoteOnEvent,
99 int PitchBend,
100 type_t VoiceType,
101 int iKeyGroup
102 ) {
103 this->pEngineChannel = pEngineChannel;
104 Orphan = false;
105
106 #if CONFIG_DEVMODE
107 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
108 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
109 }
110 #endif // CONFIG_DEVMODE
111
112 Type = VoiceType;
113 MIDIKey = itNoteOnEvent->Param.Note.Key;
114 MIDIVelocity = itNoteOnEvent->Param.Note.Velocity;
115 MIDIPan = pEngineChannel->ControllerTable[10];
116 if (MIDIPan == 0 && pEngineChannel->GlobalPanRight == 1) MIDIPan = 64; // workaround used to determine whether the MIDI pan has not been set
117 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
118 Delay = itNoteOnEvent->FragmentPos();
119 itTriggerEvent = itNoteOnEvent;
120 itKillEvent = Pool<Event>::Iterator();
121 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey);
122
123 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
124
125 SmplInfo = GetSampleInfo();
126 RgnInfo = GetRegionInfo();
127 InstrInfo = GetInstrumentInfo();
128
129 AboutToTrigger();
130
131 // calculate volume
132 const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity);
133 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
134 if (volume <= 0) return -1;
135
136 // select channel mode (mono or stereo)
137 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
138 // select bit depth (16 or 24)
139 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
140
141 // get starting crossfade volume level
142 float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity);
143
144 VolumeLeft = volume * pKeyInfo->PanLeft * AbstractEngine::PanCurve[64 - RgnInfo.Pan];
145 VolumeRight = volume * pKeyInfo->PanRight * AbstractEngine::PanCurve[64 + RgnInfo.Pan];
146
147 float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
148 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
149 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
150 PanLeftSmoother.trigger(pEngineChannel->GlobalPanLeft, subfragmentRate);
151 PanRightSmoother.trigger(pEngineChannel->GlobalPanRight, subfragmentRate);
152
153 // Check if the sample needs disk streaming or is too short for that
154 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
155 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
156
157 SetSampleStartOffset();
158
159 if (DiskVoice) { // voice to be streamed from disk
160 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
161 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
162 } else {
163 // The cache is too small to fit a max sample buffer.
164 // Setting MaxRAMPos to 0 will probably cause a click
165 // in the audio, but it's better than not handling
166 // this case at all, which would have caused the
167 // unsigned MaxRAMPos to be set to a negative number.
168 MaxRAMPos = 0;
169 }
170
171 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
172 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
173
174 if (OrderNewStream()) return -1;
175 dmsg(4,("Disk voice launched (cached samples: %d, total Samples: %d, MaxRAMPos: %d, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
176 }
177 else { // RAM only voice
178 MaxRAMPos = cachedsamples;
179 RAMLoop = (SmplInfo.HasLoops);
180 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
181 }
182 if (RAMLoop) {
183 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
184 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
185 loop.uiStart = SmplInfo.LoopStart;
186 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
187 loop.uiSize = SmplInfo.LoopLength;
188 }
189
190 Pitch = CalculatePitchInfo(PitchBend);
191
192 // the length of the decay and release curves are dependent on the velocity
193 const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity);
194
195 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
196 // get current value of EG1 controller
197 double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity);
198
199 // calculate influence of EG1 controller on EG1's parameters
200 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
201
202 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
203 } else {
204 pSignalUnitRack->Trigger();
205 }
206
207 #ifdef CONFIG_INTERPOLATE_VOLUME
208 // setup initial volume in synthesis parameters
209 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
210 if (pEngineChannel->GetMute()) {
211 finalSynthesisParameters.fFinalVolumeLeft = 0;
212 finalSynthesisParameters.fFinalVolumeRight = 0;
213 }
214 else
215 #else
216 {
217 float finalVolume;
218 if (pSignalUnitRack == NULL) {
219 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
220 } else {
221 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
222 }
223
224 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * pEngineChannel->GlobalPanLeft;
225 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * pEngineChannel->GlobalPanRight;
226 }
227 #endif
228 #endif
229
230 if (pSignalUnitRack == NULL) {
231 // setup EG 2 (VCF Cutoff EG)
232 {
233 // get current value of EG2 controller
234 double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity);
235
236 // calculate influence of EG2 controller on EG2's parameters
237 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
238
239 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
240 }
241
242
243 // setup EG 3 (VCO EG)
244 {
245 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
246 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
247 float eg3depth = (bPortamento)
248 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey) * 100)
249 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
250 float eg3time = (bPortamento)
251 ? pEngineChannel->PortamentoTime
252 : RgnInfo.EG3Attack;
253 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
254 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
255 }
256
257
258 // setup LFO 1 (VCA LFO)
259 InitLFO1();
260 // setup LFO 2 (VCF Cutoff LFO)
261 InitLFO2();
262 // setup LFO 3 (VCO LFO)
263 InitLFO3();
264 }
265
266
267 #if CONFIG_FORCE_FILTER
268 const bool bUseFilter = true;
269 #else // use filter only if instrument file told so
270 const bool bUseFilter = RgnInfo.VCFEnabled;
271 #endif // CONFIG_FORCE_FILTER
272 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
273 if (bUseFilter) {
274 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
275 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
276 #else // use the one defined in the instrument file
277 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
278 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
279
280 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
281 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
282 #else // use the one defined in the instrument file
283 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
284 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
285
286 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
287 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
288 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
289 #else // override filter type
290 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
291 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
292 #endif // CONFIG_OVERRIDE_FILTER_TYPE
293
294 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
295 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
296
297 // calculate cutoff frequency
298 CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity);
299
300 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
301
302 // calculate resonance
303 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
304 VCFResonanceCtrl.fvalue = resonance;
305 } else {
306 VCFCutoffCtrl.controller = 0;
307 VCFResonanceCtrl.controller = 0;
308 }
309
310 const bool bEq =
311 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
312
313 if (bEq) {
314 pEq->GetInChannelLeft()->Clear();
315 pEq->GetInChannelRight()->Clear();
316 pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle());
317 }
318
319 return 0; // success
320 }
321
322 void AbstractVoice::SetSampleStartOffset() {
323 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
324 Pos = RgnInfo.SampleStartOffset;
325 }
326
327 /**
328 * Synthesizes the current audio fragment for this voice.
329 *
330 * @param Samples - number of sample points to be rendered in this audio
331 * fragment cycle
332 * @param pSrc - pointer to input sample data
333 * @param Skip - number of sample points to skip in output buffer
334 */
335 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
336 bool delay = false; // Whether the voice playback should be delayed for this call
337
338 if (pSignalUnitRack != NULL) {
339 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
340 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
341 if (delaySteps >= Samples) {
342 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
343 delay = true;
344 } else {
345 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
346 Samples -= delaySteps;
347 Skip += delaySteps;
348 }
349 }
350 }
351
352 AbstractEngineChannel* pChannel = pEngineChannel;
353 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey);
354
355 const bool bVoiceRequiresDedicatedRouting =
356 pEngineChannel->GetFxSendCount() > 0 &&
357 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
358
359 const bool bEq =
360 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
361
362 if (bEq) {
363 pEq->GetInChannelLeft()->Clear();
364 pEq->GetInChannelRight()->Clear();
365 finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip];
366 finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip];
367 pSignalUnitRack->UpdateEqSettings(pEq);
368 } else if (bVoiceRequiresDedicatedRouting) {
369 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
370 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
371 } else {
372 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
373 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
374 }
375 finalSynthesisParameters.pSrc = pSrc;
376
377 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
378 RTList<Event>::Iterator itNoteEvent;
379 GetFirstEventOnKey(MIDIKey, itNoteEvent);
380
381 RTList<Event>::Iterator itGroupEvent;
382 if (pGroupEvents) itGroupEvent = pGroupEvents->first();
383
384 if (itTriggerEvent) { // skip events that happened before this voice was triggered
385 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
386 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
387
388 // we can't simply compare the timestamp here, because note events
389 // might happen on the same time stamp, so we have to deal on the
390 // actual sequence the note events arrived instead (see bug #112)
391 for (; itNoteEvent; ++itNoteEvent) {
392 if (itTriggerEvent == itNoteEvent) {
393 ++itNoteEvent;
394 break;
395 }
396 }
397 }
398
399 uint killPos;
400 if (itKillEvent) {
401 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
402 if (maxFadeOutPos < 0) {
403 // There's not enough space in buffer to do a fade out
404 // from max volume (this can only happen for audio
405 // drivers that use Samples < MaxSamplesPerCycle).
406 // End the EG1 here, at pos 0, with a shorter max fade
407 // out time.
408 if (pSignalUnitRack == NULL) {
409 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
410 } else {
411 // TODO:
412 }
413 itKillEvent = Pool<Event>::Iterator();
414 } else {
415 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
416 }
417 }
418
419 uint i = Skip;
420 while (i < Samples) {
421 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
422
423 // initialize all final synthesis parameters
424 fFinalCutoff = VCFCutoffCtrl.fvalue;
425 fFinalResonance = VCFResonanceCtrl.fvalue;
426
427 // process MIDI control change and pitchbend events for this subfragment
428 processCCEvents(itCCEvent, iSubFragmentEnd);
429 uint8_t pan = MIDIPan;
430 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CaluclatePan(pan);
431
432 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan]);
433 PanRightSmoother.update(AbstractEngine::PanCurve[pan]);
434
435 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend;
436 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render();
437 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
438 if (pChannel->GetMute()) fFinalVolume = 0;
439 #endif
440
441 // process transition events (note on, note off & sustain pedal)
442 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
443 processGroupEvents(itGroupEvent, iSubFragmentEnd);
444
445 if (pSignalUnitRack == NULL) {
446 // if the voice was killed in this subfragment, or if the
447 // filter EG is finished, switch EG1 to fade out stage
448 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
449 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
450 pEG2->getSegmentType() == EG::segment_end)) {
451 pEG1->enterFadeOutStage();
452 itKillEvent = Pool<Event>::Iterator();
453 }
454
455 // process envelope generators
456 switch (pEG1->getSegmentType()) {
457 case EG::segment_lin:
458 fFinalVolume *= pEG1->processLin();
459 break;
460 case EG::segment_exp:
461 fFinalVolume *= pEG1->processExp();
462 break;
463 case EG::segment_end:
464 fFinalVolume *= pEG1->getLevel();
465 break; // noop
466 case EG::segment_pow:
467 fFinalVolume *= pEG1->processPow();
468 break;
469 }
470 switch (pEG2->getSegmentType()) {
471 case EG::segment_lin:
472 fFinalCutoff *= pEG2->processLin();
473 break;
474 case EG::segment_exp:
475 fFinalCutoff *= pEG2->processExp();
476 break;
477 case EG::segment_end:
478 fFinalCutoff *= pEG2->getLevel();
479 break; // noop
480 case EG::segment_pow:
481 fFinalCutoff *= pEG2->processPow();
482 break;
483 }
484 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
485
486 // process low frequency oscillators
487 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
488 if (bLFO2Enabled) fFinalCutoff *= pLFO2->render();
489 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
490 } else {
491 // if the voice was killed in this subfragment, enter fade out stage
492 if (itKillEvent && killPos <= iSubFragmentEnd) {
493 pSignalUnitRack->EnterFadeOutStage();
494 itKillEvent = Pool<Event>::Iterator();
495 }
496
497 // if the filter EG is finished, switch EG1 to fade out stage
498 /*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
499 pEG2->getSegmentType() == EG::segment_end) {
500 pEG1->enterFadeOutStage();
501 itKillEvent = Pool<Event>::Iterator();
502 }*/
503 // TODO: ^^^
504
505 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
506 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
507 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
508
509 finalSynthesisParameters.fFinalPitch =
510 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
511
512 }
513
514 // limit the pitch so we don't read outside the buffer
515 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
516
517 // if filter enabled then update filter coefficients
518 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
519 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
520 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
521 }
522
523 // do we need resampling?
524 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
525 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
526 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
527 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
528 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
529
530 // prepare final synthesis parameters structure
531 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
532 #ifdef CONFIG_INTERPOLATE_VOLUME
533 finalSynthesisParameters.fFinalVolumeDeltaLeft =
534 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
535 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
536 finalSynthesisParameters.fFinalVolumeDeltaRight =
537 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
538 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
539 #else
540 finalSynthesisParameters.fFinalVolumeLeft =
541 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
542 finalSynthesisParameters.fFinalVolumeRight =
543 fFinalVolume * VolumeRight * PanRightSmoother.render();
544 #endif
545 // render audio for one subfragment
546 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
547
548 if (pSignalUnitRack == NULL) {
549 // stop the rendering if volume EG is finished
550 if (pEG1->getSegmentType() == EG::segment_end) break;
551 } else {
552 // stop the rendering if the endpoint unit is not active
553 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
554 }
555
556 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
557
558 if (pSignalUnitRack == NULL) {
559 // increment envelopes' positions
560 if (pEG1->active()) {
561
562 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
563 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
564 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
565 }
566
567 pEG1->increment(1);
568 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
569 }
570 if (pEG2->active()) {
571 pEG2->increment(1);
572 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
573 }
574 EG3.increment(1);
575 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
576 } else {
577 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
578 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
579 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
580 }*/
581 // TODO: ^^^
582
583 if (!delay) pSignalUnitRack->Increment();
584 }
585
586 Pos = newPos;
587 i = iSubFragmentEnd;
588 }
589
590 if (delay) return;
591
592 if (bVoiceRequiresDedicatedRouting) {
593 if (bEq) {
594 pEq->RenderAudio(Samples);
595 pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
596 pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
597 }
598 optional<float> effectSendLevels[2] = {
599 pMidiKeyInfo->ReverbSend,
600 pMidiKeyInfo->ChorusSend
601 };
602 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
603 } else if (bEq) {
604 pEq->RenderAudio(Samples);
605 pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples);
606 pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples);
607 }
608 }
609
610 /**
611 * Process given list of MIDI control change and pitch bend events for
612 * the given time.
613 *
614 * @param itEvent - iterator pointing to the next event to be processed
615 * @param End - youngest time stamp where processing should be stopped
616 */
617 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
618 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
619 if (itEvent->Type == Event::type_control_change && itEvent->Param.CC.Controller) { // if (valid) MIDI control change event
620 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
621 ProcessCutoffEvent(itEvent);
622 }
623 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
624 processResonanceEvent(itEvent);
625 }
626 if (pSignalUnitRack == NULL) {
627 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
628 pLFO1->update(itEvent->Param.CC.Value);
629 }
630 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
631 pLFO2->update(itEvent->Param.CC.Value);
632 }
633 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
634 pLFO3->update(itEvent->Param.CC.Value);
635 }
636 }
637 if (itEvent->Param.CC.Controller == 7) { // volume
638 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
639 } else if (itEvent->Param.CC.Controller == 10) { // panpot
640 MIDIPan = itEvent->Param.CC.Value;
641 }
642 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
643 processPitchEvent(itEvent);
644 }
645
646 ProcessCCEvent(itEvent);
647 if (pSignalUnitRack != NULL) {
648 pSignalUnitRack->ProcessCCEvent(itEvent);
649 }
650 }
651 }
652
653 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
654 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
655 }
656
657 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
658 // convert absolute controller value to differential
659 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
660 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
661 const float resonancedelta = (float) ctrldelta;
662 fFinalResonance += resonancedelta;
663 // needed for initialization of parameter
664 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
665 }
666
667 /**
668 * Process given list of MIDI note on, note off and sustain pedal events
669 * for the given time.
670 *
671 * @param itEvent - iterator pointing to the next event to be processed
672 * @param End - youngest time stamp where processing should be stopped
673 */
674 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
675 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
676 // some voice types ignore note off
677 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
678 if (itEvent->Type == Event::type_release) {
679 EnterReleaseStage();
680 } else if (itEvent->Type == Event::type_cancel_release) {
681 if (pSignalUnitRack == NULL) {
682 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
683 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
684 } else {
685 pSignalUnitRack->CancelRelease();
686 }
687 }
688 }
689 }
690 }
691
692 /**
693 * Process given list of events aimed at all voices in a key group.
694 *
695 * @param itEvent - iterator pointing to the next event to be processed
696 * @param End - youngest time stamp where processing should be stopped
697 */
698 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
699 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
700 ProcessGroupEvent(itEvent);
701 }
702 }
703
704 /** @brief Update current portamento position.
705 *
706 * Will be called when portamento mode is enabled to get the final
707 * portamento position of this active voice from where the next voice(s)
708 * might continue to slide on.
709 *
710 * @param itNoteOffEvent - event which causes this voice to die soon
711 */
712 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
713 if (pSignalUnitRack == NULL) {
714 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
715 pEngineChannel->PortamentoPos = (float) MIDIKey + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
716 } else {
717 // TODO:
718 }
719 }
720
721 /**
722 * Kill the voice in regular sense. Let the voice render audio until
723 * the kill event actually occured and then fade down the volume level
724 * very quickly and let the voice die finally. Unlike a normal release
725 * of a voice, a kill process cannot be cancalled and is therefore
726 * usually used for voice stealing and key group conflicts.
727 *
728 * @param itKillEvent - event which caused the voice to be killed
729 */
730 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
731 #if CONFIG_DEVMODE
732 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
733 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
734 #endif // CONFIG_DEVMODE
735
736 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
737 this->itKillEvent = itKillEvent;
738 }
739
740 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
741 PitchInfo pitch;
742 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey % 12];
743
744 // GSt behaviour: maximum transpose up is 40 semitones. If
745 // MIDI key is more than 40 semitones above unity note,
746 // the transpose is not done.
747 if (!SmplInfo.Unpitched && (MIDIKey - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey - (int) RgnInfo.UnityNote) * 100;
748
749 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
750 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
751 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
752
753 return pitch;
754 }
755
756 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
757 // For 16 bit samples, we downscale by 32768 to convert from
758 // int16 value range to DSP value range (which is
759 // -1.0..1.0). For 24 bit, we downscale from int32.
760 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
761
762 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
763
764 // the volume of release triggered samples depends on note length
765 if (Type & Voice::type_release_trigger) {
766 float noteLength = float(GetEngine()->FrameTime + Delay -
767 GetNoteOnTime(MIDIKey) ) / GetEngine()->SampleRate;
768
769 volume *= GetReleaseTriggerAttenuation(noteLength);
770 }
771
772 return volume;
773 }
774
775 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
776 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
777 }
778
779 void AbstractVoice::EnterReleaseStage() {
780 if (pSignalUnitRack == NULL) {
781 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
782 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
783 } else {
784 pSignalUnitRack->EnterReleaseStage();
785 }
786 }
787
788 bool AbstractVoice::EG1Finished() {
789 if (pSignalUnitRack == NULL) {
790 return pEG1->getSegmentType() == EG::segment_end;
791 } else {
792 return !pSignalUnitRack->GetEndpointUnit()->Active();
793 }
794 }
795
796 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC