/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2559 - (show annotations) (download)
Sun May 18 17:38:25 2014 UTC (9 years, 10 months ago) by schoenebeck
File size: 39185 byte(s)
* Aftertouch: extended API to explicitly handle channel pressure and
  polyphonic key pressure events (so far polyphonic pressure was not
  supported at all, and channel pressure was rerouted as CC128 but not
  used so far).
* Gig Engine: Fixed support for 'aftertouch' attenuation controller.
* Bumped version (1.0.0.svn39).

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2012 Christian Schoenebeck and Grigor Iliev *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details. *
18 * *
19 * You should have received a copy of the GNU General Public License *
20 * along with this program; if not, write to the Free Software *
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
22 * MA 02111-1307 USA *
23 ***************************************************************************/
24
25 #include "AbstractVoice.h"
26
27 namespace LinuxSampler {
28
29 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
30 pEngineChannel = NULL;
31 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
32 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
33 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
34 PlaybackState = playback_state_end;
35 SynthesisMode = 0; // set all mode bits to 0 first
36 // select synthesis implementation (asm core is not supported ATM)
37 #if 0 // CONFIG_ASM && ARCH_X86
38 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
39 #else
40 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
41 #endif
42 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
43
44 finalSynthesisParameters.filterLeft.Reset();
45 finalSynthesisParameters.filterRight.Reset();
46
47 pEq = NULL;
48 bEqSupport = false;
49 }
50
51 AbstractVoice::~AbstractVoice() {
52 if (pLFO1) delete pLFO1;
53 if (pLFO2) delete pLFO2;
54 if (pLFO3) delete pLFO3;
55
56 if(pEq != NULL) delete pEq;
57 }
58
59 void AbstractVoice::CreateEq() {
60 if(!bEqSupport) return;
61 if(pEq != NULL) delete pEq;
62 pEq = new EqSupport;
63 pEq->InitEffect(GetEngine()->pAudioOutputDevice);
64 }
65
66 /**
67 * Resets voice variables. Should only be called if rendering process is
68 * suspended / not running.
69 */
70 void AbstractVoice::Reset() {
71 finalSynthesisParameters.filterLeft.Reset();
72 finalSynthesisParameters.filterRight.Reset();
73 DiskStreamRef.pStream = NULL;
74 DiskStreamRef.hStream = 0;
75 DiskStreamRef.State = Stream::state_unused;
76 DiskStreamRef.OrderID = 0;
77 PlaybackState = playback_state_end;
78 itTriggerEvent = Pool<Event>::Iterator();
79 itKillEvent = Pool<Event>::Iterator();
80 }
81
82 /**
83 * Initializes and triggers the voice, a disk stream will be launched if
84 * needed.
85 *
86 * @param pEngineChannel - engine channel on which this voice was ordered
87 * @param itNoteOnEvent - event that caused triggering of this voice
88 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
89 * @param pRegion- points to the region which provides sample wave(s) and articulation data
90 * @param VoiceType - type of this voice
91 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
92 * @returns 0 on success, a value < 0 if the voice wasn't triggered
93 * (either due to an error or e.g. because no region is
94 * defined for the given key)
95 */
96 int AbstractVoice::Trigger (
97 AbstractEngineChannel* pEngineChannel,
98 Pool<Event>::Iterator& itNoteOnEvent,
99 int PitchBend,
100 type_t VoiceType,
101 int iKeyGroup
102 ) {
103 this->pEngineChannel = pEngineChannel;
104 Orphan = false;
105
106 #if CONFIG_DEVMODE
107 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
108 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
109 }
110 #endif // CONFIG_DEVMODE
111
112 Type = VoiceType;
113 MIDIKey = itNoteOnEvent->Param.Note.Key;
114 MIDIVelocity = itNoteOnEvent->Param.Note.Velocity;
115 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
116 Delay = itNoteOnEvent->FragmentPos();
117 itTriggerEvent = itNoteOnEvent;
118 itKillEvent = Pool<Event>::Iterator();
119 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey);
120
121 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
122
123 SmplInfo = GetSampleInfo();
124 RgnInfo = GetRegionInfo();
125 InstrInfo = GetInstrumentInfo();
126
127 MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest);
128
129 AboutToTrigger();
130
131 // calculate volume
132 const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity);
133 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
134 if (volume <= 0) return -1;
135
136 // select channel mode (mono or stereo)
137 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
138 // select bit depth (16 or 24)
139 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
140
141 // get starting crossfade volume level
142 float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity);
143
144 VolumeLeft = volume * pKeyInfo->PanLeft;
145 VolumeRight = volume * pKeyInfo->PanRight;
146
147 float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
148 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
149 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
150
151 // Check if the sample needs disk streaming or is too short for that
152 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
153 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
154
155 SetSampleStartOffset();
156
157 if (DiskVoice) { // voice to be streamed from disk
158 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
159 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
160 } else {
161 // The cache is too small to fit a max sample buffer.
162 // Setting MaxRAMPos to 0 will probably cause a click
163 // in the audio, but it's better than not handling
164 // this case at all, which would have caused the
165 // unsigned MaxRAMPos to be set to a negative number.
166 MaxRAMPos = 0;
167 }
168
169 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
170 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
171
172 if (OrderNewStream()) return -1;
173 dmsg(4,("Disk voice launched (cached samples: %d, total Samples: %d, MaxRAMPos: %d, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
174 }
175 else { // RAM only voice
176 MaxRAMPos = cachedsamples;
177 RAMLoop = (SmplInfo.HasLoops);
178 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
179 }
180 if (RAMLoop) {
181 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
182 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
183 loop.uiStart = SmplInfo.LoopStart;
184 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
185 loop.uiSize = SmplInfo.LoopLength;
186 }
187
188 Pitch = CalculatePitchInfo(PitchBend);
189
190 // the length of the decay and release curves are dependent on the velocity
191 const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity);
192
193 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
194 // get current value of EG1 controller
195 double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity);
196
197 // calculate influence of EG1 controller on EG1's parameters
198 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
199
200 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
201 } else {
202 pSignalUnitRack->Trigger();
203 }
204
205 uint8_t pan = MIDIPan;
206 if (pSignalUnitRack) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan);
207 PanLeftSmoother.trigger(AbstractEngine::PanCurve[128 - pan], subfragmentRate);
208 PanRightSmoother.trigger(AbstractEngine::PanCurve[pan], subfragmentRate);
209
210 #ifdef CONFIG_INTERPOLATE_VOLUME
211 // setup initial volume in synthesis parameters
212 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
213 if (pEngineChannel->GetMute()) {
214 finalSynthesisParameters.fFinalVolumeLeft = 0;
215 finalSynthesisParameters.fFinalVolumeRight = 0;
216 }
217 else
218 #else
219 {
220 float finalVolume;
221 if (pSignalUnitRack == NULL) {
222 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
223 } else {
224 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
225 }
226
227 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * PanLeftSmoother.render();
228 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * PanRightSmoother.render();
229 }
230 #endif
231 #endif
232
233 if (pSignalUnitRack == NULL) {
234 // setup EG 2 (VCF Cutoff EG)
235 {
236 // get current value of EG2 controller
237 double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity);
238
239 // calculate influence of EG2 controller on EG2's parameters
240 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
241
242 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
243 }
244
245
246 // setup EG 3 (VCO EG)
247 {
248 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
249 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
250 float eg3depth = (bPortamento)
251 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey) * 100)
252 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
253 float eg3time = (bPortamento)
254 ? pEngineChannel->PortamentoTime
255 : RgnInfo.EG3Attack;
256 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
257 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
258 }
259
260
261 // setup LFO 1 (VCA LFO)
262 InitLFO1();
263 // setup LFO 2 (VCF Cutoff LFO)
264 InitLFO2();
265 // setup LFO 3 (VCO LFO)
266 InitLFO3();
267 }
268
269
270 #if CONFIG_FORCE_FILTER
271 const bool bUseFilter = true;
272 #else // use filter only if instrument file told so
273 const bool bUseFilter = RgnInfo.VCFEnabled;
274 #endif // CONFIG_FORCE_FILTER
275 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
276 if (bUseFilter) {
277 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
278 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
279 #else // use the one defined in the instrument file
280 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
281 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
282
283 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
284 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
285 #else // use the one defined in the instrument file
286 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
287 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
288
289 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
290 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
291 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
292 #else // override filter type
293 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
294 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
295 #endif // CONFIG_OVERRIDE_FILTER_TYPE
296
297 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
298 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
299
300 // calculate cutoff frequency
301 CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity);
302
303 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
304
305 // calculate resonance
306 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
307 VCFResonanceCtrl.fvalue = resonance;
308 } else {
309 VCFCutoffCtrl.controller = 0;
310 VCFResonanceCtrl.controller = 0;
311 }
312
313 const bool bEq =
314 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
315
316 if (bEq) {
317 pEq->GetInChannelLeft()->Clear();
318 pEq->GetInChannelRight()->Clear();
319 pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle());
320 }
321
322 return 0; // success
323 }
324
325 void AbstractVoice::SetSampleStartOffset() {
326 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
327 Pos = RgnInfo.SampleStartOffset;
328 }
329
330 /**
331 * Synthesizes the current audio fragment for this voice.
332 *
333 * @param Samples - number of sample points to be rendered in this audio
334 * fragment cycle
335 * @param pSrc - pointer to input sample data
336 * @param Skip - number of sample points to skip in output buffer
337 */
338 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
339 bool delay = false; // Whether the voice playback should be delayed for this call
340
341 if (pSignalUnitRack != NULL) {
342 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
343 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
344 if (delaySteps >= Samples) {
345 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
346 delay = true;
347 } else {
348 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
349 Samples -= delaySteps;
350 Skip += delaySteps;
351 }
352 }
353 }
354
355 AbstractEngineChannel* pChannel = pEngineChannel;
356 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey);
357
358 const bool bVoiceRequiresDedicatedRouting =
359 pEngineChannel->GetFxSendCount() > 0 &&
360 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
361
362 const bool bEq =
363 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
364
365 if (bEq) {
366 pEq->GetInChannelLeft()->Clear();
367 pEq->GetInChannelRight()->Clear();
368 finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip];
369 finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip];
370 pSignalUnitRack->UpdateEqSettings(pEq);
371 } else if (bVoiceRequiresDedicatedRouting) {
372 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
373 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
374 } else {
375 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
376 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
377 }
378 finalSynthesisParameters.pSrc = pSrc;
379
380 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
381 RTList<Event>::Iterator itNoteEvent;
382 GetFirstEventOnKey(MIDIKey, itNoteEvent);
383
384 RTList<Event>::Iterator itGroupEvent;
385 if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first();
386
387 if (itTriggerEvent) { // skip events that happened before this voice was triggered
388 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
389 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
390
391 // we can't simply compare the timestamp here, because note events
392 // might happen on the same time stamp, so we have to deal on the
393 // actual sequence the note events arrived instead (see bug #112)
394 for (; itNoteEvent; ++itNoteEvent) {
395 if (itTriggerEvent == itNoteEvent) {
396 ++itNoteEvent;
397 break;
398 }
399 }
400 }
401
402 uint killPos;
403 if (itKillEvent) {
404 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
405 if (maxFadeOutPos < 0) {
406 // There's not enough space in buffer to do a fade out
407 // from max volume (this can only happen for audio
408 // drivers that use Samples < MaxSamplesPerCycle).
409 // End the EG1 here, at pos 0, with a shorter max fade
410 // out time.
411 if (pSignalUnitRack == NULL) {
412 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
413 } else {
414 pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
415 }
416 itKillEvent = Pool<Event>::Iterator();
417 } else {
418 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
419 }
420 }
421
422 uint i = Skip;
423 while (i < Samples) {
424 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
425
426 // initialize all final synthesis parameters
427 fFinalCutoff = VCFCutoffCtrl.fvalue;
428 fFinalResonance = VCFResonanceCtrl.fvalue;
429
430 // process MIDI control change, aftertouch and pitchbend events for this subfragment
431 processCCEvents(itCCEvent, iSubFragmentEnd);
432 uint8_t pan = MIDIPan;
433 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan);
434
435 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan]);
436 PanRightSmoother.update(AbstractEngine::PanCurve[pan]);
437
438 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend;
439 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render();
440 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
441 if (pChannel->GetMute()) fFinalVolume = 0;
442 #endif
443
444 // process transition events (note on, note off & sustain pedal)
445 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
446 processGroupEvents(itGroupEvent, iSubFragmentEnd);
447
448 if (pSignalUnitRack == NULL) {
449 // if the voice was killed in this subfragment, or if the
450 // filter EG is finished, switch EG1 to fade out stage
451 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
452 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
453 pEG2->getSegmentType() == EG::segment_end)) {
454 pEG1->enterFadeOutStage();
455 itKillEvent = Pool<Event>::Iterator();
456 }
457
458 // process envelope generators
459 switch (pEG1->getSegmentType()) {
460 case EG::segment_lin:
461 fFinalVolume *= pEG1->processLin();
462 break;
463 case EG::segment_exp:
464 fFinalVolume *= pEG1->processExp();
465 break;
466 case EG::segment_end:
467 fFinalVolume *= pEG1->getLevel();
468 break; // noop
469 case EG::segment_pow:
470 fFinalVolume *= pEG1->processPow();
471 break;
472 }
473 switch (pEG2->getSegmentType()) {
474 case EG::segment_lin:
475 fFinalCutoff *= pEG2->processLin();
476 break;
477 case EG::segment_exp:
478 fFinalCutoff *= pEG2->processExp();
479 break;
480 case EG::segment_end:
481 fFinalCutoff *= pEG2->getLevel();
482 break; // noop
483 case EG::segment_pow:
484 fFinalCutoff *= pEG2->processPow();
485 break;
486 }
487 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
488
489 // process low frequency oscillators
490 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
491 if (bLFO2Enabled) fFinalCutoff *= pLFO2->render();
492 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
493 } else {
494 // if the voice was killed in this subfragment, enter fade out stage
495 if (itKillEvent && killPos <= iSubFragmentEnd) {
496 pSignalUnitRack->EnterFadeOutStage();
497 itKillEvent = Pool<Event>::Iterator();
498 }
499
500 // if the filter EG is finished, switch EG1 to fade out stage
501 /*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
502 pEG2->getSegmentType() == EG::segment_end) {
503 pEG1->enterFadeOutStage();
504 itKillEvent = Pool<Event>::Iterator();
505 }*/
506 // TODO: ^^^
507
508 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
509 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
510 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
511
512 finalSynthesisParameters.fFinalPitch =
513 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
514
515 }
516
517 // limit the pitch so we don't read outside the buffer
518 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
519
520 // if filter enabled then update filter coefficients
521 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
522 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
523 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
524 }
525
526 // do we need resampling?
527 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
528 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
529 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
530 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
531 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
532
533 // prepare final synthesis parameters structure
534 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
535 #ifdef CONFIG_INTERPOLATE_VOLUME
536 finalSynthesisParameters.fFinalVolumeDeltaLeft =
537 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
538 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
539 finalSynthesisParameters.fFinalVolumeDeltaRight =
540 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
541 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
542 #else
543 finalSynthesisParameters.fFinalVolumeLeft =
544 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
545 finalSynthesisParameters.fFinalVolumeRight =
546 fFinalVolume * VolumeRight * PanRightSmoother.render();
547 #endif
548 // render audio for one subfragment
549 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
550
551 if (pSignalUnitRack == NULL) {
552 // stop the rendering if volume EG is finished
553 if (pEG1->getSegmentType() == EG::segment_end) break;
554 } else {
555 // stop the rendering if the endpoint unit is not active
556 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
557 }
558
559 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
560
561 if (pSignalUnitRack == NULL) {
562 // increment envelopes' positions
563 if (pEG1->active()) {
564
565 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
566 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
567 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
568 }
569
570 pEG1->increment(1);
571 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
572 }
573 if (pEG2->active()) {
574 pEG2->increment(1);
575 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
576 }
577 EG3.increment(1);
578 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
579 } else {
580 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
581 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
582 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
583 }*/
584 // TODO: ^^^
585
586 if (!delay) pSignalUnitRack->Increment();
587 }
588
589 Pos = newPos;
590 i = iSubFragmentEnd;
591 }
592
593 if (delay) return;
594
595 if (bVoiceRequiresDedicatedRouting) {
596 if (bEq) {
597 pEq->RenderAudio(Samples);
598 pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
599 pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
600 }
601 optional<float> effectSendLevels[2] = {
602 pMidiKeyInfo->ReverbSend,
603 pMidiKeyInfo->ChorusSend
604 };
605 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
606 } else if (bEq) {
607 pEq->RenderAudio(Samples);
608 pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples);
609 pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples);
610 }
611 }
612
613 /**
614 * Process given list of MIDI control change, aftertouch and pitch bend
615 * events for the given time.
616 *
617 * @param itEvent - iterator pointing to the next event to be processed
618 * @param End - youngest time stamp where processing should be stopped
619 */
620 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
621 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
622 if (itEvent->Type == Event::type_control_change && itEvent->Param.CC.Controller) { // if (valid) MIDI control change event
623 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
624 ProcessCutoffEvent(itEvent);
625 }
626 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
627 processResonanceEvent(itEvent);
628 }
629 if (pSignalUnitRack == NULL) {
630 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
631 pLFO1->update(itEvent->Param.CC.Value);
632 }
633 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
634 pLFO2->update(itEvent->Param.CC.Value);
635 }
636 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
637 pLFO3->update(itEvent->Param.CC.Value);
638 }
639 }
640 if (itEvent->Param.CC.Controller == 7) { // volume
641 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
642 } else if (itEvent->Param.CC.Controller == 10) { // panpot
643 MIDIPan = CalculatePan(itEvent->Param.CC.Value);
644 }
645 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
646 processPitchEvent(itEvent);
647 } else if (itEvent->Type == Event::type_channel_pressure) {
648 ProcessChannelPressureEvent(itEvent);
649 } else if (itEvent->Type == Event::type_note_pressure) {
650 ProcessPolyphonicKeyPressureEvent(itEvent);
651 }
652
653 ProcessCCEvent(itEvent);
654 if (pSignalUnitRack != NULL) {
655 pSignalUnitRack->ProcessCCEvent(itEvent);
656 }
657 }
658 }
659
660 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
661 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
662 }
663
664 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
665 // convert absolute controller value to differential
666 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
667 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
668 const float resonancedelta = (float) ctrldelta;
669 fFinalResonance += resonancedelta;
670 // needed for initialization of parameter
671 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
672 }
673
674 /**
675 * Process given list of MIDI note on, note off and sustain pedal events
676 * for the given time.
677 *
678 * @param itEvent - iterator pointing to the next event to be processed
679 * @param End - youngest time stamp where processing should be stopped
680 */
681 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
682 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
683 // some voice types ignore note off
684 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
685 if (itEvent->Type == Event::type_release) {
686 EnterReleaseStage();
687 } else if (itEvent->Type == Event::type_cancel_release) {
688 if (pSignalUnitRack == NULL) {
689 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
690 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
691 } else {
692 pSignalUnitRack->CancelRelease();
693 }
694 }
695 }
696 }
697 }
698
699 /**
700 * Process given list of events aimed at all voices in a key group.
701 *
702 * @param itEvent - iterator pointing to the next event to be processed
703 * @param End - youngest time stamp where processing should be stopped
704 */
705 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
706 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
707 ProcessGroupEvent(itEvent);
708 }
709 }
710
711 /** @brief Update current portamento position.
712 *
713 * Will be called when portamento mode is enabled to get the final
714 * portamento position of this active voice from where the next voice(s)
715 * might continue to slide on.
716 *
717 * @param itNoteOffEvent - event which causes this voice to die soon
718 */
719 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
720 if (pSignalUnitRack == NULL) {
721 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
722 pEngineChannel->PortamentoPos = (float) MIDIKey + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
723 } else {
724 // TODO:
725 }
726 }
727
728 /**
729 * Kill the voice in regular sense. Let the voice render audio until
730 * the kill event actually occured and then fade down the volume level
731 * very quickly and let the voice die finally. Unlike a normal release
732 * of a voice, a kill process cannot be cancalled and is therefore
733 * usually used for voice stealing and key group conflicts.
734 *
735 * @param itKillEvent - event which caused the voice to be killed
736 */
737 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
738 #if CONFIG_DEVMODE
739 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
740 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
741 #endif // CONFIG_DEVMODE
742
743 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
744 this->itKillEvent = itKillEvent;
745 }
746
747 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
748 PitchInfo pitch;
749 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey % 12];
750
751 // GSt behaviour: maximum transpose up is 40 semitones. If
752 // MIDI key is more than 40 semitones above unity note,
753 // the transpose is not done.
754 if (!SmplInfo.Unpitched && (MIDIKey - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey - (int) RgnInfo.UnityNote) * 100;
755
756 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
757 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
758 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
759
760 return pitch;
761 }
762
763 void AbstractVoice::onScaleTuningChanged() {
764 PitchInfo pitch = this->Pitch;
765 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey % 12];
766
767 // GSt behaviour: maximum transpose up is 40 semitones. If
768 // MIDI key is more than 40 semitones above unity note,
769 // the transpose is not done.
770 if (!SmplInfo.Unpitched && (MIDIKey - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey - (int) RgnInfo.UnityNote) * 100;
771
772 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
773 this->Pitch = pitch;
774 }
775
776 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
777 // For 16 bit samples, we downscale by 32768 to convert from
778 // int16 value range to DSP value range (which is
779 // -1.0..1.0). For 24 bit, we downscale from int32.
780 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
781
782 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
783
784 // the volume of release triggered samples depends on note length
785 if (Type & Voice::type_release_trigger) {
786 float noteLength = float(GetEngine()->FrameTime + Delay -
787 GetNoteOnTime(MIDIKey) ) / GetEngine()->SampleRate;
788
789 volume *= GetReleaseTriggerAttenuation(noteLength);
790 }
791
792 return volume;
793 }
794
795 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
796 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
797 }
798
799 void AbstractVoice::EnterReleaseStage() {
800 if (pSignalUnitRack == NULL) {
801 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
802 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
803 } else {
804 pSignalUnitRack->EnterReleaseStage();
805 }
806 }
807
808 bool AbstractVoice::EG1Finished() {
809 if (pSignalUnitRack == NULL) {
810 return pEG1->getSegmentType() == EG::segment_end;
811 } else {
812 return !pSignalUnitRack->GetEndpointUnit()->Active();
813 }
814 }
815
816 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC