/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2931 - (show annotations) (download)
Sat Jul 9 14:38:33 2016 UTC (7 years, 9 months ago) by schoenebeck
File size: 40805 byte(s)
* Implemented built-in instrument script function "change_vol()".
* Implemented built-in instrument script function "change_tune()".
* Implemented built-in instrument script function "change_pan()".
* Bumped version (2.0.0.svn11).

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2015 Christian Schoenebeck and Grigor Iliev *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details. *
18 * *
19 * You should have received a copy of the GNU General Public License *
20 * along with this program; if not, write to the Free Software *
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
22 * MA 02111-1307 USA *
23 ***************************************************************************/
24
25 #include "AbstractVoice.h"
26
27 namespace LinuxSampler {
28
29 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
30 pEngineChannel = NULL;
31 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
32 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
33 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
34 PlaybackState = playback_state_end;
35 SynthesisMode = 0; // set all mode bits to 0 first
36 // select synthesis implementation (asm core is not supported ATM)
37 #if 0 // CONFIG_ASM && ARCH_X86
38 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
39 #else
40 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
41 #endif
42 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
43
44 finalSynthesisParameters.filterLeft.Reset();
45 finalSynthesisParameters.filterRight.Reset();
46
47 pEq = NULL;
48 bEqSupport = false;
49 }
50
51 AbstractVoice::~AbstractVoice() {
52 if (pLFO1) delete pLFO1;
53 if (pLFO2) delete pLFO2;
54 if (pLFO3) delete pLFO3;
55
56 if(pEq != NULL) delete pEq;
57 }
58
59 void AbstractVoice::CreateEq() {
60 if(!bEqSupport) return;
61 if(pEq != NULL) delete pEq;
62 pEq = new EqSupport;
63 pEq->InitEffect(GetEngine()->pAudioOutputDevice);
64 }
65
66 /**
67 * Resets voice variables. Should only be called if rendering process is
68 * suspended / not running.
69 */
70 void AbstractVoice::Reset() {
71 finalSynthesisParameters.filterLeft.Reset();
72 finalSynthesisParameters.filterRight.Reset();
73 DiskStreamRef.pStream = NULL;
74 DiskStreamRef.hStream = 0;
75 DiskStreamRef.State = Stream::state_unused;
76 DiskStreamRef.OrderID = 0;
77 PlaybackState = playback_state_end;
78 itTriggerEvent = Pool<Event>::Iterator();
79 itKillEvent = Pool<Event>::Iterator();
80 }
81
82 /**
83 * Initializes and triggers the voice, a disk stream will be launched if
84 * needed.
85 *
86 * @param pEngineChannel - engine channel on which this voice was ordered
87 * @param itNoteOnEvent - event that caused triggering of this voice
88 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
89 * @param pRegion- points to the region which provides sample wave(s) and articulation data
90 * @param VoiceType - type of this voice
91 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
92 * @returns 0 on success, a value < 0 if the voice wasn't triggered
93 * (either due to an error or e.g. because no region is
94 * defined for the given key)
95 */
96 int AbstractVoice::Trigger (
97 AbstractEngineChannel* pEngineChannel,
98 Pool<Event>::Iterator& itNoteOnEvent,
99 int PitchBend,
100 type_t VoiceType,
101 int iKeyGroup
102 ) {
103 this->pEngineChannel = pEngineChannel;
104 Orphan = false;
105
106 #if CONFIG_DEVMODE
107 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
108 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
109 }
110 #endif // CONFIG_DEVMODE
111
112 Type = VoiceType;
113 pNote = pEngineChannel->pEngine->NoteByID( itNoteOnEvent->Param.Note.ID );
114 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
115 Delay = itNoteOnEvent->FragmentPos();
116 itTriggerEvent = itNoteOnEvent;
117 itKillEvent = Pool<Event>::Iterator();
118 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey());
119
120 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
121
122 SmplInfo = GetSampleInfo();
123 RgnInfo = GetRegionInfo();
124 InstrInfo = GetInstrumentInfo();
125
126 MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest);
127
128 AboutToTrigger();
129
130 // calculate volume
131 const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity);
132 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
133 if (volume <= 0) return -1;
134
135 // select channel mode (mono or stereo)
136 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
137 // select bit depth (16 or 24)
138 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
139
140 // get starting crossfade volume level
141 float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity);
142
143 VolumeLeft = volume * pKeyInfo->PanLeft;
144 VolumeRight = volume * pKeyInfo->PanRight;
145
146 float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
147 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
148 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
149 NoteVolumeSmoother.trigger(pNote ? pNote->Override.Volume : 1.f, subfragmentRate);
150
151 // Check if the sample needs disk streaming or is too short for that
152 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
153 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
154
155 SetSampleStartOffset();
156
157 if (DiskVoice) { // voice to be streamed from disk
158 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
159 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
160 } else {
161 // The cache is too small to fit a max sample buffer.
162 // Setting MaxRAMPos to 0 will probably cause a click
163 // in the audio, but it's better than not handling
164 // this case at all, which would have caused the
165 // unsigned MaxRAMPos to be set to a negative number.
166 MaxRAMPos = 0;
167 }
168
169 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
170 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
171
172 if (OrderNewStream()) return -1;
173 dmsg(4,("Disk voice launched (cached samples: %ld, total Samples: %d, MaxRAMPos: %lu, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
174 }
175 else { // RAM only voice
176 MaxRAMPos = cachedsamples;
177 RAMLoop = (SmplInfo.HasLoops);
178 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
179 }
180 if (RAMLoop) {
181 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
182 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
183 loop.uiStart = SmplInfo.LoopStart;
184 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
185 loop.uiSize = SmplInfo.LoopLength;
186 }
187
188 Pitch = CalculatePitchInfo(PitchBend);
189 NotePitch = (pNote) ? pNote->Override.Pitch : 1.0f;
190
191 // the length of the decay and release curves are dependent on the velocity
192 const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity);
193
194 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
195 // get current value of EG1 controller
196 double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity);
197
198 // calculate influence of EG1 controller on EG1's parameters
199 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
200
201 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
202 } else {
203 pSignalUnitRack->Trigger();
204 }
205
206 const uint8_t pan = (pSignalUnitRack) ? pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan) : MIDIPan;
207 NotePanLeft = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 0 /*left*/ ) : 1.f;
208 NotePanRight = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 1 /*right*/) : 1.f;
209 PanLeftSmoother.trigger(
210 AbstractEngine::PanCurve[128 - pan] * NotePanLeft,
211 subfragmentRate
212 );
213 PanRightSmoother.trigger(
214 AbstractEngine::PanCurve[pan] * NotePanRight,
215 subfragmentRate
216 );
217
218 #ifdef CONFIG_INTERPOLATE_VOLUME
219 // setup initial volume in synthesis parameters
220 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
221 if (pEngineChannel->GetMute()) {
222 finalSynthesisParameters.fFinalVolumeLeft = 0;
223 finalSynthesisParameters.fFinalVolumeRight = 0;
224 }
225 else
226 #else
227 {
228 float finalVolume;
229 if (pSignalUnitRack == NULL) {
230 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
231 } else {
232 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
233 }
234
235 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * PanLeftSmoother.render();
236 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * PanRightSmoother.render();
237 }
238 #endif
239 #endif
240
241 if (pSignalUnitRack == NULL) {
242 // setup EG 2 (VCF Cutoff EG)
243 {
244 // get current value of EG2 controller
245 double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity);
246
247 // calculate influence of EG2 controller on EG2's parameters
248 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
249
250 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
251 }
252
253
254 // setup EG 3 (VCO EG)
255 {
256 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
257 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
258 float eg3depth = (bPortamento)
259 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey()) * 100)
260 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
261 float eg3time = (bPortamento)
262 ? pEngineChannel->PortamentoTime
263 : RgnInfo.EG3Attack;
264 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
265 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
266 }
267
268
269 // setup LFO 1 (VCA LFO)
270 InitLFO1();
271 // setup LFO 2 (VCF Cutoff LFO)
272 InitLFO2();
273 // setup LFO 3 (VCO LFO)
274 InitLFO3();
275 }
276
277
278 #if CONFIG_FORCE_FILTER
279 const bool bUseFilter = true;
280 #else // use filter only if instrument file told so
281 const bool bUseFilter = RgnInfo.VCFEnabled;
282 #endif // CONFIG_FORCE_FILTER
283 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
284 if (bUseFilter) {
285 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
286 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
287 #else // use the one defined in the instrument file
288 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
289 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
290
291 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
292 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
293 #else // use the one defined in the instrument file
294 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
295 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
296
297 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
298 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
299 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
300 #else // override filter type
301 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
302 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
303 #endif // CONFIG_OVERRIDE_FILTER_TYPE
304
305 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
306 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
307
308 // calculate cutoff frequency
309 CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity);
310
311 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
312
313 // calculate resonance
314 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
315 VCFResonanceCtrl.fvalue = resonance;
316 } else {
317 VCFCutoffCtrl.controller = 0;
318 VCFResonanceCtrl.controller = 0;
319 }
320
321 const bool bEq =
322 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
323
324 if (bEq) {
325 pEq->GetInChannelLeft()->Clear();
326 pEq->GetInChannelRight()->Clear();
327 pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle());
328 }
329
330 return 0; // success
331 }
332
333 void AbstractVoice::SetSampleStartOffset() {
334 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
335 Pos = RgnInfo.SampleStartOffset;
336 }
337
338 /**
339 * Synthesizes the current audio fragment for this voice.
340 *
341 * @param Samples - number of sample points to be rendered in this audio
342 * fragment cycle
343 * @param pSrc - pointer to input sample data
344 * @param Skip - number of sample points to skip in output buffer
345 */
346 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
347 bool delay = false; // Whether the voice playback should be delayed for this call
348
349 if (pSignalUnitRack != NULL) {
350 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
351 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
352 if (delaySteps >= Samples) {
353 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
354 delay = true;
355 } else {
356 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
357 Samples -= delaySteps;
358 Skip += delaySteps;
359 }
360 }
361 }
362
363 AbstractEngineChannel* pChannel = pEngineChannel;
364 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey());
365
366 const bool bVoiceRequiresDedicatedRouting =
367 pEngineChannel->GetFxSendCount() > 0 &&
368 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
369
370 const bool bEq =
371 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport();
372
373 if (bEq) {
374 pEq->GetInChannelLeft()->Clear();
375 pEq->GetInChannelRight()->Clear();
376 finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip];
377 finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip];
378 pSignalUnitRack->UpdateEqSettings(pEq);
379 } else if (bVoiceRequiresDedicatedRouting) {
380 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
381 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
382 } else {
383 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
384 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
385 }
386 finalSynthesisParameters.pSrc = pSrc;
387
388 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
389 RTList<Event>::Iterator itNoteEvent;
390 GetFirstEventOnKey(HostKey(), itNoteEvent);
391
392 RTList<Event>::Iterator itGroupEvent;
393 if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first();
394
395 if (itTriggerEvent) { // skip events that happened before this voice was triggered
396 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
397 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
398
399 // we can't simply compare the timestamp here, because note events
400 // might happen on the same time stamp, so we have to deal on the
401 // actual sequence the note events arrived instead (see bug #112)
402 for (; itNoteEvent; ++itNoteEvent) {
403 if (itTriggerEvent == itNoteEvent) {
404 ++itNoteEvent;
405 break;
406 }
407 }
408 }
409
410 uint killPos;
411 if (itKillEvent) {
412 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
413 if (maxFadeOutPos < 0) {
414 // There's not enough space in buffer to do a fade out
415 // from max volume (this can only happen for audio
416 // drivers that use Samples < MaxSamplesPerCycle).
417 // End the EG1 here, at pos 0, with a shorter max fade
418 // out time.
419 if (pSignalUnitRack == NULL) {
420 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
421 } else {
422 pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
423 }
424 itKillEvent = Pool<Event>::Iterator();
425 } else {
426 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
427 }
428 }
429
430 uint i = Skip;
431 while (i < Samples) {
432 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
433
434 // initialize all final synthesis parameters
435 fFinalCutoff = VCFCutoffCtrl.fvalue;
436 fFinalResonance = VCFResonanceCtrl.fvalue;
437
438 // process MIDI control change, aftertouch and pitchbend events for this subfragment
439 processCCEvents(itCCEvent, iSubFragmentEnd);
440 uint8_t pan = MIDIPan;
441 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan);
442
443 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan] * NotePanLeft);
444 PanRightSmoother.update(AbstractEngine::PanCurve[pan] * NotePanRight);
445
446 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend * NotePitch;
447
448 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render() * NoteVolumeSmoother.render();
449 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
450 if (pChannel->GetMute()) fFinalVolume = 0;
451 #endif
452
453 // process transition events (note on, note off & sustain pedal)
454 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
455 processGroupEvents(itGroupEvent, iSubFragmentEnd);
456
457 if (pSignalUnitRack == NULL) {
458 // if the voice was killed in this subfragment, or if the
459 // filter EG is finished, switch EG1 to fade out stage
460 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
461 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
462 pEG2->getSegmentType() == EG::segment_end)) {
463 pEG1->enterFadeOutStage();
464 itKillEvent = Pool<Event>::Iterator();
465 }
466
467 // process envelope generators
468 switch (pEG1->getSegmentType()) {
469 case EG::segment_lin:
470 fFinalVolume *= pEG1->processLin();
471 break;
472 case EG::segment_exp:
473 fFinalVolume *= pEG1->processExp();
474 break;
475 case EG::segment_end:
476 fFinalVolume *= pEG1->getLevel();
477 break; // noop
478 case EG::segment_pow:
479 fFinalVolume *= pEG1->processPow();
480 break;
481 }
482 switch (pEG2->getSegmentType()) {
483 case EG::segment_lin:
484 fFinalCutoff *= pEG2->processLin();
485 break;
486 case EG::segment_exp:
487 fFinalCutoff *= pEG2->processExp();
488 break;
489 case EG::segment_end:
490 fFinalCutoff *= pEG2->getLevel();
491 break; // noop
492 case EG::segment_pow:
493 fFinalCutoff *= pEG2->processPow();
494 break;
495 }
496 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
497
498 // process low frequency oscillators
499 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
500 if (bLFO2Enabled) fFinalCutoff *= (1.0f - pLFO2->render());
501 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
502 } else {
503 // if the voice was killed in this subfragment, enter fade out stage
504 if (itKillEvent && killPos <= iSubFragmentEnd) {
505 pSignalUnitRack->EnterFadeOutStage();
506 itKillEvent = Pool<Event>::Iterator();
507 }
508
509 // if the filter EG is finished, switch EG1 to fade out stage
510 /*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
511 pEG2->getSegmentType() == EG::segment_end) {
512 pEG1->enterFadeOutStage();
513 itKillEvent = Pool<Event>::Iterator();
514 }*/
515 // TODO: ^^^
516
517 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
518 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
519 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
520
521 finalSynthesisParameters.fFinalPitch =
522 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
523
524 }
525
526 // limit the pitch so we don't read outside the buffer
527 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
528
529 // if filter enabled then update filter coefficients
530 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
531 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
532 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
533 }
534
535 // do we need resampling?
536 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
537 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
538 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
539 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
540 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
541
542 // prepare final synthesis parameters structure
543 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
544 #ifdef CONFIG_INTERPOLATE_VOLUME
545 finalSynthesisParameters.fFinalVolumeDeltaLeft =
546 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
547 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
548 finalSynthesisParameters.fFinalVolumeDeltaRight =
549 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
550 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
551 #else
552 finalSynthesisParameters.fFinalVolumeLeft =
553 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
554 finalSynthesisParameters.fFinalVolumeRight =
555 fFinalVolume * VolumeRight * PanRightSmoother.render();
556 #endif
557 // render audio for one subfragment
558 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
559
560 if (pSignalUnitRack == NULL) {
561 // stop the rendering if volume EG is finished
562 if (pEG1->getSegmentType() == EG::segment_end) break;
563 } else {
564 // stop the rendering if the endpoint unit is not active
565 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
566 }
567
568 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
569
570 if (pSignalUnitRack == NULL) {
571 // increment envelopes' positions
572 if (pEG1->active()) {
573
574 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
575 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
576 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
577 }
578
579 pEG1->increment(1);
580 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
581 }
582 if (pEG2->active()) {
583 pEG2->increment(1);
584 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
585 }
586 EG3.increment(1);
587 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
588 } else {
589 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
590 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
591 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
592 }*/
593 // TODO: ^^^
594
595 if (!delay) pSignalUnitRack->Increment();
596 }
597
598 Pos = newPos;
599 i = iSubFragmentEnd;
600 }
601
602 if (delay) return;
603
604 if (bVoiceRequiresDedicatedRouting) {
605 if (bEq) {
606 pEq->RenderAudio(Samples);
607 pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
608 pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
609 }
610 optional<float> effectSendLevels[2] = {
611 pMidiKeyInfo->ReverbSend,
612 pMidiKeyInfo->ChorusSend
613 };
614 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
615 } else if (bEq) {
616 pEq->RenderAudio(Samples);
617 pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples);
618 pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples);
619 }
620 }
621
622 /**
623 * Process given list of MIDI control change, aftertouch and pitch bend
624 * events for the given time.
625 *
626 * @param itEvent - iterator pointing to the next event to be processed
627 * @param End - youngest time stamp where processing should be stopped
628 */
629 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
630 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
631 if (itEvent->Type == Event::type_control_change && itEvent->Param.CC.Controller) { // if (valid) MIDI control change event
632 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
633 ProcessCutoffEvent(itEvent);
634 }
635 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
636 processResonanceEvent(itEvent);
637 }
638 if (pSignalUnitRack == NULL) {
639 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
640 pLFO1->update(itEvent->Param.CC.Value);
641 }
642 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
643 pLFO2->update(itEvent->Param.CC.Value);
644 }
645 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
646 pLFO3->update(itEvent->Param.CC.Value);
647 }
648 }
649 if (itEvent->Param.CC.Controller == 7) { // volume
650 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
651 } else if (itEvent->Param.CC.Controller == 10) { // panpot
652 MIDIPan = CalculatePan(itEvent->Param.CC.Value);
653 }
654 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
655 processPitchEvent(itEvent);
656 } else if (itEvent->Type == Event::type_channel_pressure) {
657 ProcessChannelPressureEvent(itEvent);
658 } else if (itEvent->Type == Event::type_note_pressure) {
659 ProcessPolyphonicKeyPressureEvent(itEvent);
660 }
661
662 ProcessCCEvent(itEvent);
663 if (pSignalUnitRack != NULL) {
664 pSignalUnitRack->ProcessCCEvent(itEvent);
665 }
666 }
667 }
668
669 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
670 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
671 }
672
673 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
674 // convert absolute controller value to differential
675 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
676 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
677 const float resonancedelta = (float) ctrldelta;
678 fFinalResonance += resonancedelta;
679 // needed for initialization of parameter
680 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
681 }
682
683 /**
684 * Process given list of MIDI note on, note off, sustain pedal events and
685 * note synthesis parameter events for the given time.
686 *
687 * @param itEvent - iterator pointing to the next event to be processed
688 * @param End - youngest time stamp where processing should be stopped
689 */
690 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
691 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
692 // some voice types ignore note off
693 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
694 if (itEvent->Type == Event::type_release) {
695 EnterReleaseStage();
696 } else if (itEvent->Type == Event::type_cancel_release) {
697 if (pSignalUnitRack == NULL) {
698 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
699 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
700 } else {
701 pSignalUnitRack->CancelRelease();
702 }
703 }
704 }
705 // process synthesis parameter events (caused by built-in realt-time instrument script functions)
706 if (itEvent->Type == Event::type_note_synth_param && pNote &&
707 pEngineChannel->pEngine->NoteByID( itEvent->Param.NoteSynthParam.NoteID ) == pNote)
708 {
709 switch (itEvent->Param.NoteSynthParam.Type) {
710 case Event::synth_param_volume:
711 NoteVolumeSmoother.update(itEvent->Param.NoteSynthParam.AbsValue);
712 break;
713 case Event::synth_param_pitch:
714 NotePitch = itEvent->Param.NoteSynthParam.AbsValue;
715 break;
716 case Event::synth_param_pan:
717 NotePanLeft = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 0 /*left*/);
718 NotePanRight = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 1 /*right*/);
719 break;
720 }
721 }
722 }
723 }
724
725 /**
726 * Process given list of events aimed at all voices in a key group.
727 *
728 * @param itEvent - iterator pointing to the next event to be processed
729 * @param End - youngest time stamp where processing should be stopped
730 */
731 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
732 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
733 ProcessGroupEvent(itEvent);
734 }
735 }
736
737 /** @brief Update current portamento position.
738 *
739 * Will be called when portamento mode is enabled to get the final
740 * portamento position of this active voice from where the next voice(s)
741 * might continue to slide on.
742 *
743 * @param itNoteOffEvent - event which causes this voice to die soon
744 */
745 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
746 if (pSignalUnitRack == NULL) {
747 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
748 pEngineChannel->PortamentoPos = (float) MIDIKey() + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
749 } else {
750 // TODO:
751 }
752 }
753
754 /**
755 * Kill the voice in regular sense. Let the voice render audio until
756 * the kill event actually occured and then fade down the volume level
757 * very quickly and let the voice die finally. Unlike a normal release
758 * of a voice, a kill process cannot be cancalled and is therefore
759 * usually used for voice stealing and key group conflicts.
760 *
761 * @param itKillEvent - event which caused the voice to be killed
762 */
763 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
764 #if CONFIG_DEVMODE
765 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
766 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
767 #endif // CONFIG_DEVMODE
768
769 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
770 this->itKillEvent = itKillEvent;
771 }
772
773 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
774 PitchInfo pitch;
775 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
776
777 // GSt behaviour: maximum transpose up is 40 semitones. If
778 // MIDI key is more than 40 semitones above unity note,
779 // the transpose is not done.
780 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
781
782 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
783 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
784 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
785
786 return pitch;
787 }
788
789 void AbstractVoice::onScaleTuningChanged() {
790 PitchInfo pitch = this->Pitch;
791 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12];
792
793 // GSt behaviour: maximum transpose up is 40 semitones. If
794 // MIDI key is more than 40 semitones above unity note,
795 // the transpose is not done.
796 if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100;
797
798 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
799 this->Pitch = pitch;
800 }
801
802 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
803 // For 16 bit samples, we downscale by 32768 to convert from
804 // int16 value range to DSP value range (which is
805 // -1.0..1.0). For 24 bit, we downscale from int32.
806 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
807
808 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
809
810 // the volume of release triggered samples depends on note length
811 if (Type & Voice::type_release_trigger) {
812 float noteLength = float(GetEngine()->FrameTime + Delay -
813 GetNoteOnTime(MIDIKey()) ) / GetEngine()->SampleRate;
814
815 volume *= GetReleaseTriggerAttenuation(noteLength);
816 }
817
818 return volume;
819 }
820
821 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
822 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
823 }
824
825 void AbstractVoice::EnterReleaseStage() {
826 if (pSignalUnitRack == NULL) {
827 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
828 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
829 } else {
830 pSignalUnitRack->EnterReleaseStage();
831 }
832 }
833
834 bool AbstractVoice::EG1Finished() {
835 if (pSignalUnitRack == NULL) {
836 return pEG1->getSegmentType() == EG::segment_end;
837 } else {
838 return !pSignalUnitRack->GetEndpointUnit()->Active();
839 }
840 }
841
842 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC