/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2298 - (show annotations) (download)
Fri Dec 9 17:04:24 2011 UTC (12 years, 4 months ago) by iliev
File size: 37719 byte(s)
* use different EQ effect instance for every voice

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2011 Christian Schoenebeck and Grigor Iliev *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details. *
18 * *
19 * You should have received a copy of the GNU General Public License *
20 * along with this program; if not, write to the Free Software *
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
22 * MA 02111-1307 USA *
23 ***************************************************************************/
24
25 #include "AbstractVoice.h"
26
27 namespace LinuxSampler {
28
29 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
30 pEngineChannel = NULL;
31 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
32 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
33 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
34 PlaybackState = playback_state_end;
35 SynthesisMode = 0; // set all mode bits to 0 first
36 // select synthesis implementation (asm core is not supported ATM)
37 #if 0 // CONFIG_ASM && ARCH_X86
38 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
39 #else
40 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
41 #endif
42 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
43
44 finalSynthesisParameters.filterLeft.Reset();
45 finalSynthesisParameters.filterRight.Reset();
46
47 pEq = NULL;
48 bEqSupport = false;
49 }
50
51 AbstractVoice::~AbstractVoice() {
52 if (pLFO1) delete pLFO1;
53 if (pLFO2) delete pLFO2;
54 if (pLFO3) delete pLFO3;
55
56 if(pEq != NULL) delete pEq;
57 }
58
59 void AbstractVoice::CreateEq() {
60 if(!bEqSupport) return;
61 if(pEq != NULL) delete pEq;
62 pEq = new EqSupport;
63 pEq->InitEffect(GetEngine()->pAudioOutputDevice);
64 }
65
66 /**
67 * Resets voice variables. Should only be called if rendering process is
68 * suspended / not running.
69 */
70 void AbstractVoice::Reset() {
71 finalSynthesisParameters.filterLeft.Reset();
72 finalSynthesisParameters.filterRight.Reset();
73 DiskStreamRef.pStream = NULL;
74 DiskStreamRef.hStream = 0;
75 DiskStreamRef.State = Stream::state_unused;
76 DiskStreamRef.OrderID = 0;
77 PlaybackState = playback_state_end;
78 itTriggerEvent = Pool<Event>::Iterator();
79 itKillEvent = Pool<Event>::Iterator();
80 }
81
82 /**
83 * Initializes and triggers the voice, a disk stream will be launched if
84 * needed.
85 *
86 * @param pEngineChannel - engine channel on which this voice was ordered
87 * @param itNoteOnEvent - event that caused triggering of this voice
88 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
89 * @param pRegion- points to the region which provides sample wave(s) and articulation data
90 * @param VoiceType - type of this voice
91 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
92 * @returns 0 on success, a value < 0 if the voice wasn't triggered
93 * (either due to an error or e.g. because no region is
94 * defined for the given key)
95 */
96 int AbstractVoice::Trigger (
97 AbstractEngineChannel* pEngineChannel,
98 Pool<Event>::Iterator& itNoteOnEvent,
99 int PitchBend,
100 type_t VoiceType,
101 int iKeyGroup
102 ) {
103 this->pEngineChannel = pEngineChannel;
104 Orphan = false;
105
106 #if CONFIG_DEVMODE
107 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
108 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
109 }
110 #endif // CONFIG_DEVMODE
111
112 Type = VoiceType;
113 MIDIKey = itNoteOnEvent->Param.Note.Key;
114 MIDIVelocity = itNoteOnEvent->Param.Note.Velocity;
115 MIDIPan = pEngineChannel->ControllerTable[10];
116 if (MIDIPan == 0 && pEngineChannel->GlobalPanRight == 1) MIDIPan = 64; // workaround used to determine whether the MIDI pan has not been set
117 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
118 Delay = itNoteOnEvent->FragmentPos();
119 itTriggerEvent = itNoteOnEvent;
120 itKillEvent = Pool<Event>::Iterator();
121 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey);
122
123 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
124
125 SmplInfo = GetSampleInfo();
126 RgnInfo = GetRegionInfo();
127 InstrInfo = GetInstrumentInfo();
128
129 AboutToTrigger();
130
131 // calculate volume
132 const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity);
133 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
134 if (volume <= 0) return -1;
135
136 // select channel mode (mono or stereo)
137 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
138 // select bit depth (16 or 24)
139 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
140
141 // get starting crossfade volume level
142 float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity);
143
144 VolumeLeft = volume * pKeyInfo->PanLeft * AbstractEngine::PanCurve[64 - RgnInfo.Pan];
145 VolumeRight = volume * pKeyInfo->PanRight * AbstractEngine::PanCurve[64 + RgnInfo.Pan];
146
147 float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
148 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
149 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
150 PanLeftSmoother.trigger(pEngineChannel->GlobalPanLeft, subfragmentRate);
151 PanRightSmoother.trigger(pEngineChannel->GlobalPanRight, subfragmentRate);
152
153 // Check if the sample needs disk streaming or is too short for that
154 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
155 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
156
157 SetSampleStartOffset();
158
159 if (DiskVoice) { // voice to be streamed from disk
160 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
161 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
162 } else {
163 // The cache is too small to fit a max sample buffer.
164 // Setting MaxRAMPos to 0 will probably cause a click
165 // in the audio, but it's better than not handling
166 // this case at all, which would have caused the
167 // unsigned MaxRAMPos to be set to a negative number.
168 MaxRAMPos = 0;
169 }
170
171 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
172 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
173
174 if (OrderNewStream()) return -1;
175 dmsg(4,("Disk voice launched (cached samples: %d, total Samples: %d, MaxRAMPos: %d, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
176 }
177 else { // RAM only voice
178 MaxRAMPos = cachedsamples;
179 RAMLoop = (SmplInfo.HasLoops);
180 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
181 }
182 if (RAMLoop) {
183 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
184 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
185 loop.uiStart = SmplInfo.LoopStart;
186 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
187 loop.uiSize = SmplInfo.LoopLength;
188 }
189
190 Pitch = CalculatePitchInfo(PitchBend);
191
192 // the length of the decay and release curves are dependent on the velocity
193 const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity);
194
195 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
196 // get current value of EG1 controller
197 double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity);
198
199 // calculate influence of EG1 controller on EG1's parameters
200 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
201
202 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
203 } else {
204 pSignalUnitRack->Trigger();
205 }
206
207 #ifdef CONFIG_INTERPOLATE_VOLUME
208 // setup initial volume in synthesis parameters
209 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
210 if (pEngineChannel->GetMute()) {
211 finalSynthesisParameters.fFinalVolumeLeft = 0;
212 finalSynthesisParameters.fFinalVolumeRight = 0;
213 }
214 else
215 #else
216 {
217 float finalVolume;
218 if (pSignalUnitRack == NULL) {
219 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
220 } else {
221 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
222 }
223
224 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * pEngineChannel->GlobalPanLeft;
225 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * pEngineChannel->GlobalPanRight;
226 }
227 #endif
228 #endif
229
230 if (pSignalUnitRack == NULL) {
231 // setup EG 2 (VCF Cutoff EG)
232 {
233 // get current value of EG2 controller
234 double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity);
235
236 // calculate influence of EG2 controller on EG2's parameters
237 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
238
239 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
240 }
241
242
243 // setup EG 3 (VCO EG)
244 {
245 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
246 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
247 float eg3depth = (bPortamento)
248 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey) * 100)
249 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
250 float eg3time = (bPortamento)
251 ? pEngineChannel->PortamentoTime
252 : RgnInfo.EG3Attack;
253 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
254 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
255 }
256
257
258 // setup LFO 1 (VCA LFO)
259 InitLFO1();
260 // setup LFO 2 (VCF Cutoff LFO)
261 InitLFO2();
262 // setup LFO 3 (VCO LFO)
263 InitLFO3();
264 }
265
266
267 #if CONFIG_FORCE_FILTER
268 const bool bUseFilter = true;
269 #else // use filter only if instrument file told so
270 const bool bUseFilter = RgnInfo.VCFEnabled;
271 #endif // CONFIG_FORCE_FILTER
272 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
273 if (bUseFilter) {
274 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
275 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
276 #else // use the one defined in the instrument file
277 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
278 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
279
280 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
281 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
282 #else // use the one defined in the instrument file
283 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
284 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
285
286 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
287 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
288 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
289 #else // override filter type
290 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
291 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
292 #endif // CONFIG_OVERRIDE_FILTER_TYPE
293
294 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
295 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
296
297 // calculate cutoff frequency
298 CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity);
299
300 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
301
302 // calculate resonance
303 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
304 VCFResonanceCtrl.fvalue = resonance;
305 } else {
306 VCFCutoffCtrl.controller = 0;
307 VCFResonanceCtrl.controller = 0;
308 }
309
310 return 0; // success
311 }
312
313 void AbstractVoice::SetSampleStartOffset() {
314 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
315 Pos = RgnInfo.SampleStartOffset;
316 }
317
318 /**
319 * Synthesizes the current audio fragment for this voice.
320 *
321 * @param Samples - number of sample points to be rendered in this audio
322 * fragment cycle
323 * @param pSrc - pointer to input sample data
324 * @param Skip - number of sample points to skip in output buffer
325 */
326 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
327 bool delay = false; // Whether the voice playback should be delayed for this call
328
329 if (pSignalUnitRack != NULL) {
330 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
331 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
332 if (delaySteps >= Samples) {
333 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
334 delay = true;
335 } else {
336 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
337 Samples -= delaySteps;
338 Skip += delaySteps;
339 }
340 }
341 }
342
343 AbstractEngineChannel* pChannel = pEngineChannel;
344 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey);
345
346 const bool bVoiceRequiresDedicatedRouting =
347 pEngineChannel->GetFxSendCount() > 0 &&
348 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
349
350 const bool bEq =
351 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
352
353 if (bEq) {
354 pEq->GetInChannelLeft()->Clear();
355 pEq->GetInChannelRight()->Clear();
356 finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip];
357 finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip];
358 pSignalUnitRack->UpdateEqSettings(pEq);
359 } else if (bVoiceRequiresDedicatedRouting) {
360 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
361 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
362 } else {
363 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
364 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
365 }
366 finalSynthesisParameters.pSrc = pSrc;
367
368 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
369 RTList<Event>::Iterator itNoteEvent;
370 GetFirstEventOnKey(MIDIKey, itNoteEvent);
371
372 RTList<Event>::Iterator itGroupEvent;
373 if (pGroupEvents) itGroupEvent = pGroupEvents->first();
374
375 if (itTriggerEvent) { // skip events that happened before this voice was triggered
376 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
377 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
378
379 // we can't simply compare the timestamp here, because note events
380 // might happen on the same time stamp, so we have to deal on the
381 // actual sequence the note events arrived instead (see bug #112)
382 for (; itNoteEvent; ++itNoteEvent) {
383 if (itTriggerEvent == itNoteEvent) {
384 ++itNoteEvent;
385 break;
386 }
387 }
388 }
389
390 uint killPos;
391 if (itKillEvent) {
392 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
393 if (maxFadeOutPos < 0) {
394 // There's not enough space in buffer to do a fade out
395 // from max volume (this can only happen for audio
396 // drivers that use Samples < MaxSamplesPerCycle).
397 // End the EG1 here, at pos 0, with a shorter max fade
398 // out time.
399 if (pSignalUnitRack == NULL) {
400 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
401 } else {
402 // TODO:
403 }
404 itKillEvent = Pool<Event>::Iterator();
405 } else {
406 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
407 }
408 }
409
410 uint i = Skip;
411 while (i < Samples) {
412 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
413
414 // initialize all final synthesis parameters
415 fFinalCutoff = VCFCutoffCtrl.fvalue;
416 fFinalResonance = VCFResonanceCtrl.fvalue;
417
418 // process MIDI control change and pitchbend events for this subfragment
419 processCCEvents(itCCEvent, iSubFragmentEnd);
420 uint8_t pan = MIDIPan;
421 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CaluclatePan(pan);
422
423 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan]);
424 PanRightSmoother.update(AbstractEngine::PanCurve[pan]);
425
426 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend;
427 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render();
428 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
429 if (pChannel->GetMute()) fFinalVolume = 0;
430 #endif
431
432 // process transition events (note on, note off & sustain pedal)
433 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
434 processGroupEvents(itGroupEvent, iSubFragmentEnd);
435
436 if (pSignalUnitRack == NULL) {
437 // if the voice was killed in this subfragment, or if the
438 // filter EG is finished, switch EG1 to fade out stage
439 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
440 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
441 pEG2->getSegmentType() == EG::segment_end)) {
442 pEG1->enterFadeOutStage();
443 itKillEvent = Pool<Event>::Iterator();
444 }
445
446 // process envelope generators
447 switch (pEG1->getSegmentType()) {
448 case EG::segment_lin:
449 fFinalVolume *= pEG1->processLin();
450 break;
451 case EG::segment_exp:
452 fFinalVolume *= pEG1->processExp();
453 break;
454 case EG::segment_end:
455 fFinalVolume *= pEG1->getLevel();
456 break; // noop
457 case EG::segment_pow:
458 fFinalVolume *= pEG1->processPow();
459 break;
460 }
461 switch (pEG2->getSegmentType()) {
462 case EG::segment_lin:
463 fFinalCutoff *= pEG2->processLin();
464 break;
465 case EG::segment_exp:
466 fFinalCutoff *= pEG2->processExp();
467 break;
468 case EG::segment_end:
469 fFinalCutoff *= pEG2->getLevel();
470 break; // noop
471 case EG::segment_pow:
472 fFinalCutoff *= pEG2->processPow();
473 break;
474 }
475 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
476
477 // process low frequency oscillators
478 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
479 if (bLFO2Enabled) fFinalCutoff *= pLFO2->render();
480 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
481 } else {
482 // if the voice was killed in this subfragment, or if the
483 // filter EG is finished, switch EG1 to fade out stage
484 /*if ((itKillEvent && killPos <= iSubFragmentEnd) ||
485 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
486 pEG2->getSegmentType() == EG::segment_end)) {
487 pEG1->enterFadeOutStage();
488 itKillEvent = Pool<Event>::Iterator();
489 }*/
490 // TODO: ^^^
491
492 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
493 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
494 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
495
496 finalSynthesisParameters.fFinalPitch =
497 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
498
499 }
500
501 // limit the pitch so we don't read outside the buffer
502 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
503
504 // if filter enabled then update filter coefficients
505 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
506 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
507 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
508 }
509
510 // do we need resampling?
511 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
512 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
513 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
514 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
515 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
516
517 // prepare final synthesis parameters structure
518 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
519 #ifdef CONFIG_INTERPOLATE_VOLUME
520 finalSynthesisParameters.fFinalVolumeDeltaLeft =
521 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
522 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
523 finalSynthesisParameters.fFinalVolumeDeltaRight =
524 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
525 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
526 #else
527 finalSynthesisParameters.fFinalVolumeLeft =
528 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
529 finalSynthesisParameters.fFinalVolumeRight =
530 fFinalVolume * VolumeRight * PanRightSmoother.render();
531 #endif
532 // render audio for one subfragment
533 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
534
535 if (pSignalUnitRack == NULL) {
536 // stop the rendering if volume EG is finished
537 if (pEG1->getSegmentType() == EG::segment_end) break;
538 } else {
539 // stop the rendering if the endpoint unit is not active
540 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
541 }
542
543 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
544
545 if (pSignalUnitRack == NULL) {
546 // increment envelopes' positions
547 if (pEG1->active()) {
548
549 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
550 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
551 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
552 }
553
554 pEG1->increment(1);
555 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
556 }
557 if (pEG2->active()) {
558 pEG2->increment(1);
559 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
560 }
561 EG3.increment(1);
562 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
563 } else {
564 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
565 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
566 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
567 }*/
568 // TODO: ^^^
569
570 if (!delay) pSignalUnitRack->Increment();
571 }
572
573 Pos = newPos;
574 i = iSubFragmentEnd;
575 }
576
577 if (delay) return;
578
579 if (bVoiceRequiresDedicatedRouting) {
580 if (bEq) {
581 pEq->RenderAudio(Samples);
582 pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
583 pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
584 }
585 optional<float> effectSendLevels[2] = {
586 pMidiKeyInfo->ReverbSend,
587 pMidiKeyInfo->ChorusSend
588 };
589 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
590 } else if (bEq) {
591 pEq->RenderAudio(Samples);
592 pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples);
593 pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples);
594 }
595 }
596
597 /**
598 * Process given list of MIDI control change and pitch bend events for
599 * the given time.
600 *
601 * @param itEvent - iterator pointing to the next event to be processed
602 * @param End - youngest time stamp where processing should be stopped
603 */
604 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
605 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
606 if (itEvent->Type == Event::type_control_change && itEvent->Param.CC.Controller) { // if (valid) MIDI control change event
607 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
608 ProcessCutoffEvent(itEvent);
609 }
610 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
611 processResonanceEvent(itEvent);
612 }
613 if (pSignalUnitRack == NULL) {
614 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
615 pLFO1->update(itEvent->Param.CC.Value);
616 }
617 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
618 pLFO2->update(itEvent->Param.CC.Value);
619 }
620 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
621 pLFO3->update(itEvent->Param.CC.Value);
622 }
623 }
624 if (itEvent->Param.CC.Controller == 7) { // volume
625 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
626 } else if (itEvent->Param.CC.Controller == 10) { // panpot
627 MIDIPan = itEvent->Param.CC.Value;
628 }
629 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
630 processPitchEvent(itEvent);
631 }
632
633 ProcessCCEvent(itEvent);
634 if (pSignalUnitRack != NULL) {
635 pSignalUnitRack->ProcessCCEvent(itEvent);
636 }
637 }
638 }
639
640 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
641 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
642 }
643
644 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
645 // convert absolute controller value to differential
646 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
647 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
648 const float resonancedelta = (float) ctrldelta;
649 fFinalResonance += resonancedelta;
650 // needed for initialization of parameter
651 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
652 }
653
654 /**
655 * Process given list of MIDI note on, note off and sustain pedal events
656 * for the given time.
657 *
658 * @param itEvent - iterator pointing to the next event to be processed
659 * @param End - youngest time stamp where processing should be stopped
660 */
661 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
662 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
663 // some voice types ignore note off
664 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
665 if (itEvent->Type == Event::type_release) {
666 EnterReleaseStage();
667 } else if (itEvent->Type == Event::type_cancel_release) {
668 if (pSignalUnitRack == NULL) {
669 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
670 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
671 } else {
672 pSignalUnitRack->CancelRelease();
673 }
674 }
675 }
676 }
677 }
678
679 /**
680 * Process given list of events aimed at all voices in a key group.
681 *
682 * @param itEvent - iterator pointing to the next event to be processed
683 * @param End - youngest time stamp where processing should be stopped
684 */
685 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
686 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
687 ProcessGroupEvent(itEvent);
688 }
689 }
690
691 /** @brief Update current portamento position.
692 *
693 * Will be called when portamento mode is enabled to get the final
694 * portamento position of this active voice from where the next voice(s)
695 * might continue to slide on.
696 *
697 * @param itNoteOffEvent - event which causes this voice to die soon
698 */
699 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
700 if (pSignalUnitRack == NULL) {
701 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
702 pEngineChannel->PortamentoPos = (float) MIDIKey + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
703 } else {
704 // TODO:
705 }
706 }
707
708 /**
709 * Kill the voice in regular sense. Let the voice render audio until
710 * the kill event actually occured and then fade down the volume level
711 * very quickly and let the voice die finally. Unlike a normal release
712 * of a voice, a kill process cannot be cancalled and is therefore
713 * usually used for voice stealing and key group conflicts.
714 *
715 * @param itKillEvent - event which caused the voice to be killed
716 */
717 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
718 #if CONFIG_DEVMODE
719 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
720 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
721 #endif // CONFIG_DEVMODE
722
723 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
724 this->itKillEvent = itKillEvent;
725 }
726
727 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
728 PitchInfo pitch;
729 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey % 12];
730
731 // GSt behaviour: maximum transpose up is 40 semitones. If
732 // MIDI key is more than 40 semitones above unity note,
733 // the transpose is not done.
734 if (!SmplInfo.Unpitched && (MIDIKey - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey - (int) RgnInfo.UnityNote) * 100;
735
736 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
737 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
738 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
739
740 return pitch;
741 }
742
743 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
744 // For 16 bit samples, we downscale by 32768 to convert from
745 // int16 value range to DSP value range (which is
746 // -1.0..1.0). For 24 bit, we downscale from int32.
747 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
748
749 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
750
751 // the volume of release triggered samples depends on note length
752 if (Type & Voice::type_release_trigger) {
753 float noteLength = float(GetEngine()->FrameTime + Delay -
754 GetNoteOnTime(MIDIKey) ) / GetEngine()->SampleRate;
755
756 volume *= GetReleaseTriggerAttenuation(noteLength);
757 }
758
759 return volume;
760 }
761
762 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
763 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
764 }
765
766 void AbstractVoice::EnterReleaseStage() {
767 if (pSignalUnitRack == NULL) {
768 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
769 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
770 } else {
771 pSignalUnitRack->EnterReleaseStage();
772 }
773 }
774
775 bool AbstractVoice::EG1Finished() {
776 if (pSignalUnitRack == NULL) {
777 return pEG1->getSegmentType() == EG::segment_end;
778 } else {
779 return !pSignalUnitRack->GetEndpointUnit()->Active();
780 }
781 }
782
783 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC