/[svn]/linuxsampler/trunk/src/engines/common/AbstractVoice.cpp
ViewVC logotype

Contents of /linuxsampler/trunk/src/engines/common/AbstractVoice.cpp

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2297 - (show annotations) (download)
Fri Dec 9 15:04:55 2011 UTC (12 years, 4 months ago) by iliev
File size: 37553 byte(s)
* implemented opcodes delay, delay_onccN, delay_random,
  delay_samples, delay_samples_onccN

1 /***************************************************************************
2 * *
3 * LinuxSampler - modular, streaming capable sampler *
4 * *
5 * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck *
6 * Copyright (C) 2005-2008 Christian Schoenebeck *
7 * Copyright (C) 2009-2011 Christian Schoenebeck and Grigor Iliev *
8 * *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License as published by *
11 * the Free Software Foundation; either version 2 of the License, or *
12 * (at your option) any later version. *
13 * *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details. *
18 * *
19 * You should have received a copy of the GNU General Public License *
20 * along with this program; if not, write to the Free Software *
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, *
22 * MA 02111-1307 USA *
23 ***************************************************************************/
24
25 #include "AbstractVoice.h"
26
27 namespace LinuxSampler {
28
29 AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) {
30 pEngineChannel = NULL;
31 pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range)
32 pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range)
33 pLFO3 = new LFOSigned(1200.0f); // pitch LFO (-1200..+1200 range)
34 PlaybackState = playback_state_end;
35 SynthesisMode = 0; // set all mode bits to 0 first
36 // select synthesis implementation (asm core is not supported ATM)
37 #if 0 // CONFIG_ASM && ARCH_X86
38 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE());
39 #else
40 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false);
41 #endif
42 SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, gig::Profiler::isEnabled());
43
44 finalSynthesisParameters.filterLeft.Reset();
45 finalSynthesisParameters.filterRight.Reset();
46 }
47
48 AbstractVoice::~AbstractVoice() {
49 if (pLFO1) delete pLFO1;
50 if (pLFO2) delete pLFO2;
51 if (pLFO3) delete pLFO3;
52 }
53
54 /**
55 * Resets voice variables. Should only be called if rendering process is
56 * suspended / not running.
57 */
58 void AbstractVoice::Reset() {
59 finalSynthesisParameters.filterLeft.Reset();
60 finalSynthesisParameters.filterRight.Reset();
61 DiskStreamRef.pStream = NULL;
62 DiskStreamRef.hStream = 0;
63 DiskStreamRef.State = Stream::state_unused;
64 DiskStreamRef.OrderID = 0;
65 PlaybackState = playback_state_end;
66 itTriggerEvent = Pool<Event>::Iterator();
67 itKillEvent = Pool<Event>::Iterator();
68 }
69
70 /**
71 * Initializes and triggers the voice, a disk stream will be launched if
72 * needed.
73 *
74 * @param pEngineChannel - engine channel on which this voice was ordered
75 * @param itNoteOnEvent - event that caused triggering of this voice
76 * @param PitchBend - MIDI detune factor (-8192 ... +8191)
77 * @param pRegion- points to the region which provides sample wave(s) and articulation data
78 * @param VoiceType - type of this voice
79 * @param iKeyGroup - a value > 0 defines a key group in which this voice is member of
80 * @returns 0 on success, a value < 0 if the voice wasn't triggered
81 * (either due to an error or e.g. because no region is
82 * defined for the given key)
83 */
84 int AbstractVoice::Trigger (
85 AbstractEngineChannel* pEngineChannel,
86 Pool<Event>::Iterator& itNoteOnEvent,
87 int PitchBend,
88 type_t VoiceType,
89 int iKeyGroup
90 ) {
91 this->pEngineChannel = pEngineChannel;
92 Orphan = false;
93
94 #if CONFIG_DEVMODE
95 if (itNoteOnEvent->FragmentPos() > GetEngine()->MaxSamplesPerCycle) { // just a sanity check for debugging
96 dmsg(1,("Voice::Trigger(): ERROR, TriggerDelay > Totalsamples\n"));
97 }
98 #endif // CONFIG_DEVMODE
99
100 Type = VoiceType;
101 MIDIKey = itNoteOnEvent->Param.Note.Key;
102 MIDIVelocity = itNoteOnEvent->Param.Note.Velocity;
103 MIDIPan = pEngineChannel->ControllerTable[10];
104 if (MIDIPan == 0 && pEngineChannel->GlobalPanRight == 1) MIDIPan = 64; // workaround used to determine whether the MIDI pan has not been set
105 PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet
106 Delay = itNoteOnEvent->FragmentPos();
107 itTriggerEvent = itNoteOnEvent;
108 itKillEvent = Pool<Event>::Iterator();
109 MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey);
110
111 pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0;
112
113 SmplInfo = GetSampleInfo();
114 RgnInfo = GetRegionInfo();
115 InstrInfo = GetInstrumentInfo();
116
117 AboutToTrigger();
118
119 // calculate volume
120 const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity);
121 float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume;
122 if (volume <= 0) return -1;
123
124 // select channel mode (mono or stereo)
125 SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, SmplInfo.ChannelCount == 2);
126 // select bit depth (16 or 24)
127 SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24);
128
129 // get starting crossfade volume level
130 float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity);
131
132 VolumeLeft = volume * pKeyInfo->PanLeft * AbstractEngine::PanCurve[64 - RgnInfo.Pan];
133 VolumeRight = volume * pKeyInfo->PanRight * AbstractEngine::PanCurve[64 + RgnInfo.Pan];
134
135 float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE;
136 CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate);
137 VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate);
138 PanLeftSmoother.trigger(pEngineChannel->GlobalPanLeft, subfragmentRate);
139 PanRightSmoother.trigger(pEngineChannel->GlobalPanRight, subfragmentRate);
140
141 // Check if the sample needs disk streaming or is too short for that
142 long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize;
143 DiskVoice = cachedsamples < SmplInfo.TotalFrameCount;
144
145 SetSampleStartOffset();
146
147 if (DiskVoice) { // voice to be streamed from disk
148 if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) {
149 MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK)
150 } else {
151 // The cache is too small to fit a max sample buffer.
152 // Setting MaxRAMPos to 0 will probably cause a click
153 // in the audio, but it's better than not handling
154 // this case at all, which would have caused the
155 // unsigned MaxRAMPos to be set to a negative number.
156 MaxRAMPos = 0;
157 }
158
159 // check if there's a loop defined which completely fits into the cached (RAM) part of the sample
160 RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos);
161
162 if (OrderNewStream()) return -1;
163 dmsg(4,("Disk voice launched (cached samples: %d, total Samples: %d, MaxRAMPos: %d, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no"));
164 }
165 else { // RAM only voice
166 MaxRAMPos = cachedsamples;
167 RAMLoop = (SmplInfo.HasLoops);
168 dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no"));
169 }
170 if (RAMLoop) {
171 loop.uiTotalCycles = SmplInfo.LoopPlayCount;
172 loop.uiCyclesLeft = SmplInfo.LoopPlayCount;
173 loop.uiStart = SmplInfo.LoopStart;
174 loop.uiEnd = SmplInfo.LoopStart + SmplInfo.LoopLength;
175 loop.uiSize = SmplInfo.LoopLength;
176 }
177
178 Pitch = CalculatePitchInfo(PitchBend);
179
180 // the length of the decay and release curves are dependent on the velocity
181 const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity);
182
183 if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG)
184 // get current value of EG1 controller
185 double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity);
186
187 // calculate influence of EG1 controller on EG1's parameters
188 EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue);
189
190 TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
191 } else {
192 pSignalUnitRack->Trigger();
193 }
194
195 #ifdef CONFIG_INTERPOLATE_VOLUME
196 // setup initial volume in synthesis parameters
197 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
198 if (pEngineChannel->GetMute()) {
199 finalSynthesisParameters.fFinalVolumeLeft = 0;
200 finalSynthesisParameters.fFinalVolumeRight = 0;
201 }
202 else
203 #else
204 {
205 float finalVolume;
206 if (pSignalUnitRack == NULL) {
207 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel();
208 } else {
209 finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume();
210 }
211
212 finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * pEngineChannel->GlobalPanLeft;
213 finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * pEngineChannel->GlobalPanRight;
214 }
215 #endif
216 #endif
217
218 if (pSignalUnitRack == NULL) {
219 // setup EG 2 (VCF Cutoff EG)
220 {
221 // get current value of EG2 controller
222 double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity);
223
224 // calculate influence of EG2 controller on EG2's parameters
225 EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue);
226
227 TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity);
228 }
229
230
231 // setup EG 3 (VCO EG)
232 {
233 // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch
234 bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f;
235 float eg3depth = (bPortamento)
236 ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey) * 100)
237 : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth);
238 float eg3time = (bPortamento)
239 ? pEngineChannel->PortamentoTime
240 : RgnInfo.EG3Attack;
241 EG3.trigger(eg3depth, eg3time, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
242 dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time));
243 }
244
245
246 // setup LFO 1 (VCA LFO)
247 InitLFO1();
248 // setup LFO 2 (VCF Cutoff LFO)
249 InitLFO2();
250 // setup LFO 3 (VCO LFO)
251 InitLFO3();
252 }
253
254
255 #if CONFIG_FORCE_FILTER
256 const bool bUseFilter = true;
257 #else // use filter only if instrument file told so
258 const bool bUseFilter = RgnInfo.VCFEnabled;
259 #endif // CONFIG_FORCE_FILTER
260 SYNTHESIS_MODE_SET_FILTER(SynthesisMode, bUseFilter);
261 if (bUseFilter) {
262 #ifdef CONFIG_OVERRIDE_CUTOFF_CTRL
263 VCFCutoffCtrl.controller = CONFIG_OVERRIDE_CUTOFF_CTRL;
264 #else // use the one defined in the instrument file
265 VCFCutoffCtrl.controller = GetVCFCutoffCtrl();
266 #endif // CONFIG_OVERRIDE_CUTOFF_CTRL
267
268 #ifdef CONFIG_OVERRIDE_RESONANCE_CTRL
269 VCFResonanceCtrl.controller = CONFIG_OVERRIDE_RESONANCE_CTRL;
270 #else // use the one defined in the instrument file
271 VCFResonanceCtrl.controller = GetVCFResonanceCtrl();
272 #endif // CONFIG_OVERRIDE_RESONANCE_CTRL
273
274 #ifndef CONFIG_OVERRIDE_FILTER_TYPE
275 finalSynthesisParameters.filterLeft.SetType(RgnInfo.VCFType);
276 finalSynthesisParameters.filterRight.SetType(RgnInfo.VCFType);
277 #else // override filter type
278 finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
279 finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE);
280 #endif // CONFIG_OVERRIDE_FILTER_TYPE
281
282 VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller];
283 VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller];
284
285 // calculate cutoff frequency
286 CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity);
287
288 VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase);
289
290 // calculate resonance
291 float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : RgnInfo.VCFResonance);
292 VCFResonanceCtrl.fvalue = resonance;
293 } else {
294 VCFCutoffCtrl.controller = 0;
295 VCFResonanceCtrl.controller = 0;
296 }
297
298 return 0; // success
299 }
300
301 void AbstractVoice::SetSampleStartOffset() {
302 finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points)
303 Pos = RgnInfo.SampleStartOffset;
304 }
305
306 /**
307 * Synthesizes the current audio fragment for this voice.
308 *
309 * @param Samples - number of sample points to be rendered in this audio
310 * fragment cycle
311 * @param pSrc - pointer to input sample data
312 * @param Skip - number of sample points to skip in output buffer
313 */
314 void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) {
315 bool delay = false; // Whether the voice playback should be delayed for this call
316
317 if (pSignalUnitRack != NULL) {
318 uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger();
319 if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback
320 if (delaySteps >= Samples) {
321 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples);
322 delay = true;
323 } else {
324 pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps);
325 Samples -= delaySteps;
326 Skip += delaySteps;
327 }
328 }
329 }
330
331 AbstractEngineChannel* pChannel = pEngineChannel;
332 MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey);
333
334 const bool bVoiceRequiresDedicatedRouting =
335 pEngineChannel->GetFxSendCount() > 0 &&
336 (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend);
337
338 const bool bEq =
339 pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && GetEngine()->pEq->HasSupport();
340
341 if (bEq) {
342 GetEngine()->pEq->GetInChannelLeft()->Clear();
343 GetEngine()->pEq->GetInChannelRight()->Clear();
344 finalSynthesisParameters.pOutLeft = &GetEngine()->pEq->GetInChannelLeft()->Buffer()[Skip];
345 finalSynthesisParameters.pOutRight = &GetEngine()->pEq->GetInChannelRight()->Buffer()[Skip];
346 pSignalUnitRack->UpdateEqSettings(GetEngine()->pEq);
347 } else if (bVoiceRequiresDedicatedRouting) {
348 finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip];
349 finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip];
350 } else {
351 finalSynthesisParameters.pOutLeft = &pChannel->pChannelLeft->Buffer()[Skip];
352 finalSynthesisParameters.pOutRight = &pChannel->pChannelRight->Buffer()[Skip];
353 }
354 finalSynthesisParameters.pSrc = pSrc;
355
356 RTList<Event>::Iterator itCCEvent = pChannel->pEvents->first();
357 RTList<Event>::Iterator itNoteEvent;
358 GetFirstEventOnKey(MIDIKey, itNoteEvent);
359
360 RTList<Event>::Iterator itGroupEvent;
361 if (pGroupEvents) itGroupEvent = pGroupEvents->first();
362
363 if (itTriggerEvent) { // skip events that happened before this voice was triggered
364 while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent;
365 while (itGroupEvent && itGroupEvent->FragmentPos() <= Skip) ++itGroupEvent;
366
367 // we can't simply compare the timestamp here, because note events
368 // might happen on the same time stamp, so we have to deal on the
369 // actual sequence the note events arrived instead (see bug #112)
370 for (; itNoteEvent; ++itNoteEvent) {
371 if (itTriggerEvent == itNoteEvent) {
372 ++itNoteEvent;
373 break;
374 }
375 }
376 }
377
378 uint killPos;
379 if (itKillEvent) {
380 int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples();
381 if (maxFadeOutPos < 0) {
382 // There's not enough space in buffer to do a fade out
383 // from max volume (this can only happen for audio
384 // drivers that use Samples < MaxSamplesPerCycle).
385 // End the EG1 here, at pos 0, with a shorter max fade
386 // out time.
387 if (pSignalUnitRack == NULL) {
388 pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
389 } else {
390 // TODO:
391 }
392 itKillEvent = Pool<Event>::Iterator();
393 } else {
394 killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos);
395 }
396 }
397
398 uint i = Skip;
399 while (i < Samples) {
400 int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples);
401
402 // initialize all final synthesis parameters
403 fFinalCutoff = VCFCutoffCtrl.fvalue;
404 fFinalResonance = VCFResonanceCtrl.fvalue;
405
406 // process MIDI control change and pitchbend events for this subfragment
407 processCCEvents(itCCEvent, iSubFragmentEnd);
408 uint8_t pan = MIDIPan;
409 if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CaluclatePan(pan);
410
411 PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan]);
412 PanRightSmoother.update(AbstractEngine::PanCurve[pan]);
413
414 finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend;
415 float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render();
416 #ifdef CONFIG_PROCESS_MUTED_CHANNELS
417 if (pChannel->GetMute()) fFinalVolume = 0;
418 #endif
419
420 // process transition events (note on, note off & sustain pedal)
421 processTransitionEvents(itNoteEvent, iSubFragmentEnd);
422 processGroupEvents(itGroupEvent, iSubFragmentEnd);
423
424 if (pSignalUnitRack == NULL) {
425 // if the voice was killed in this subfragment, or if the
426 // filter EG is finished, switch EG1 to fade out stage
427 if ((itKillEvent && killPos <= iSubFragmentEnd) ||
428 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
429 pEG2->getSegmentType() == EG::segment_end)) {
430 pEG1->enterFadeOutStage();
431 itKillEvent = Pool<Event>::Iterator();
432 }
433
434 // process envelope generators
435 switch (pEG1->getSegmentType()) {
436 case EG::segment_lin:
437 fFinalVolume *= pEG1->processLin();
438 break;
439 case EG::segment_exp:
440 fFinalVolume *= pEG1->processExp();
441 break;
442 case EG::segment_end:
443 fFinalVolume *= pEG1->getLevel();
444 break; // noop
445 case EG::segment_pow:
446 fFinalVolume *= pEG1->processPow();
447 break;
448 }
449 switch (pEG2->getSegmentType()) {
450 case EG::segment_lin:
451 fFinalCutoff *= pEG2->processLin();
452 break;
453 case EG::segment_exp:
454 fFinalCutoff *= pEG2->processExp();
455 break;
456 case EG::segment_end:
457 fFinalCutoff *= pEG2->getLevel();
458 break; // noop
459 case EG::segment_pow:
460 fFinalCutoff *= pEG2->processPow();
461 break;
462 }
463 if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render();
464
465 // process low frequency oscillators
466 if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render());
467 if (bLFO2Enabled) fFinalCutoff *= pLFO2->render();
468 if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render());
469 } else {
470 // if the voice was killed in this subfragment, or if the
471 // filter EG is finished, switch EG1 to fade out stage
472 /*if ((itKillEvent && killPos <= iSubFragmentEnd) ||
473 (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) &&
474 pEG2->getSegmentType() == EG::segment_end)) {
475 pEG1->enterFadeOutStage();
476 itKillEvent = Pool<Event>::Iterator();
477 }*/
478 // TODO: ^^^
479
480 fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume();
481 fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff);
482 fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance);
483
484 finalSynthesisParameters.fFinalPitch =
485 pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch);
486
487 }
488
489 // limit the pitch so we don't read outside the buffer
490 finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH));
491
492 // if filter enabled then update filter coefficients
493 if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) {
494 finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
495 finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, GetEngine()->SampleRate);
496 }
497
498 // do we need resampling?
499 const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f;
500 const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f;
501 const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT &&
502 finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT);
503 SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired);
504
505 // prepare final synthesis parameters structure
506 finalSynthesisParameters.uiToGo = iSubFragmentEnd - i;
507 #ifdef CONFIG_INTERPOLATE_VOLUME
508 finalSynthesisParameters.fFinalVolumeDeltaLeft =
509 (fFinalVolume * VolumeLeft * PanLeftSmoother.render() -
510 finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo;
511 finalSynthesisParameters.fFinalVolumeDeltaRight =
512 (fFinalVolume * VolumeRight * PanRightSmoother.render() -
513 finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo;
514 #else
515 finalSynthesisParameters.fFinalVolumeLeft =
516 fFinalVolume * VolumeLeft * PanLeftSmoother.render();
517 finalSynthesisParameters.fFinalVolumeRight =
518 fFinalVolume * VolumeRight * PanRightSmoother.render();
519 #endif
520 // render audio for one subfragment
521 if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop);
522
523 if (pSignalUnitRack == NULL) {
524 // stop the rendering if volume EG is finished
525 if (pEG1->getSegmentType() == EG::segment_end) break;
526 } else {
527 // stop the rendering if the endpoint unit is not active
528 if (!pSignalUnitRack->GetEndpointUnit()->Active()) break;
529 }
530
531 const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch;
532
533 if (pSignalUnitRack == NULL) {
534 // increment envelopes' positions
535 if (pEG1->active()) {
536
537 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
538 if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
539 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
540 }
541
542 pEG1->increment(1);
543 if (!pEG1->toStageEndLeft()) pEG1->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
544 }
545 if (pEG2->active()) {
546 pEG2->increment(1);
547 if (!pEG2->toStageEndLeft()) pEG2->update(EG::event_stage_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
548 }
549 EG3.increment(1);
550 if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached
551 } else {
552 // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage
553 /*if (SmplInfo.HasLoops && Pos <= SmplInfo.LoopStart && SmplInfo.LoopStart < newPos) {
554 pEG1->update(EG::event_hold_end, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
555 }*/
556 // TODO: ^^^
557
558 if (!delay) pSignalUnitRack->Increment();
559 }
560
561 Pos = newPos;
562 i = iSubFragmentEnd;
563 }
564
565 if (delay) return;
566
567 if (bVoiceRequiresDedicatedRouting) {
568 if (bEq) {
569 GetEngine()->pEq->RenderAudio(Samples);
570 GetEngine()->pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples);
571 GetEngine()->pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples);
572 }
573 optional<float> effectSendLevels[2] = {
574 pMidiKeyInfo->ReverbSend,
575 pMidiKeyInfo->ChorusSend
576 };
577 GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples);
578 } else if (bEq) {
579 GetEngine()->pEq->RenderAudio(Samples);
580 GetEngine()->pEq->GetOutChannelLeft()->CopyTo(pChannel->pChannelLeft, Samples);
581 GetEngine()->pEq->GetOutChannelRight()->CopyTo(pChannel->pChannelRight, Samples);
582 }
583 }
584
585 /**
586 * Process given list of MIDI control change and pitch bend events for
587 * the given time.
588 *
589 * @param itEvent - iterator pointing to the next event to be processed
590 * @param End - youngest time stamp where processing should be stopped
591 */
592 void AbstractVoice::processCCEvents(RTList<Event>::Iterator& itEvent, uint End) {
593 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
594 if (itEvent->Type == Event::type_control_change && itEvent->Param.CC.Controller) { // if (valid) MIDI control change event
595 if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) {
596 ProcessCutoffEvent(itEvent);
597 }
598 if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) {
599 processResonanceEvent(itEvent);
600 }
601 if (pSignalUnitRack == NULL) {
602 if (itEvent->Param.CC.Controller == pLFO1->ExtController) {
603 pLFO1->update(itEvent->Param.CC.Value);
604 }
605 if (itEvent->Param.CC.Controller == pLFO2->ExtController) {
606 pLFO2->update(itEvent->Param.CC.Value);
607 }
608 if (itEvent->Param.CC.Controller == pLFO3->ExtController) {
609 pLFO3->update(itEvent->Param.CC.Value);
610 }
611 }
612 if (itEvent->Param.CC.Controller == 7) { // volume
613 VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]);
614 } else if (itEvent->Param.CC.Controller == 10) { // panpot
615 MIDIPan = itEvent->Param.CC.Value;
616 }
617 } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event
618 processPitchEvent(itEvent);
619 }
620
621 ProcessCCEvent(itEvent);
622 if (pSignalUnitRack != NULL) {
623 pSignalUnitRack->ProcessCCEvent(itEvent);
624 }
625 }
626 }
627
628 void AbstractVoice::processPitchEvent(RTList<Event>::Iterator& itEvent) {
629 Pitch.PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch * Pitch.PitchBendRange);
630 }
631
632 void AbstractVoice::processResonanceEvent(RTList<Event>::Iterator& itEvent) {
633 // convert absolute controller value to differential
634 const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value;
635 VCFResonanceCtrl.value = itEvent->Param.CC.Value;
636 const float resonancedelta = (float) ctrldelta;
637 fFinalResonance += resonancedelta;
638 // needed for initialization of parameter
639 VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value;
640 }
641
642 /**
643 * Process given list of MIDI note on, note off and sustain pedal events
644 * for the given time.
645 *
646 * @param itEvent - iterator pointing to the next event to be processed
647 * @param End - youngest time stamp where processing should be stopped
648 */
649 void AbstractVoice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) {
650 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
651 // some voice types ignore note off
652 if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) {
653 if (itEvent->Type == Event::type_release) {
654 EnterReleaseStage();
655 } else if (itEvent->Type == Event::type_cancel_release) {
656 if (pSignalUnitRack == NULL) {
657 pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
658 pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
659 } else {
660 pSignalUnitRack->CancelRelease();
661 }
662 }
663 }
664 }
665 }
666
667 /**
668 * Process given list of events aimed at all voices in a key group.
669 *
670 * @param itEvent - iterator pointing to the next event to be processed
671 * @param End - youngest time stamp where processing should be stopped
672 */
673 void AbstractVoice::processGroupEvents(RTList<Event>::Iterator& itEvent, uint End) {
674 for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) {
675 ProcessGroupEvent(itEvent);
676 }
677 }
678
679 /** @brief Update current portamento position.
680 *
681 * Will be called when portamento mode is enabled to get the final
682 * portamento position of this active voice from where the next voice(s)
683 * might continue to slide on.
684 *
685 * @param itNoteOffEvent - event which causes this voice to die soon
686 */
687 void AbstractVoice::UpdatePortamentoPos(Pool<Event>::Iterator& itNoteOffEvent) {
688 if (pSignalUnitRack == NULL) {
689 const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos());
690 pEngineChannel->PortamentoPos = (float) MIDIKey + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f;
691 } else {
692 // TODO:
693 }
694 }
695
696 /**
697 * Kill the voice in regular sense. Let the voice render audio until
698 * the kill event actually occured and then fade down the volume level
699 * very quickly and let the voice die finally. Unlike a normal release
700 * of a voice, a kill process cannot be cancalled and is therefore
701 * usually used for voice stealing and key group conflicts.
702 *
703 * @param itKillEvent - event which caused the voice to be killed
704 */
705 void AbstractVoice::Kill(Pool<Event>::Iterator& itKillEvent) {
706 #if CONFIG_DEVMODE
707 if (!itKillEvent) dmsg(1,("AbstractVoice::Kill(): ERROR, !itKillEvent !!!\n"));
708 if (itKillEvent && !itKillEvent.isValid()) dmsg(1,("AbstractVoice::Kill(): ERROR, itKillEvent invalid !!!\n"));
709 #endif // CONFIG_DEVMODE
710
711 if (itTriggerEvent && itKillEvent->FragmentPos() <= itTriggerEvent->FragmentPos()) return;
712 this->itKillEvent = itKillEvent;
713 }
714
715 Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) {
716 PitchInfo pitch;
717 double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey % 12];
718
719 // GSt behaviour: maximum transpose up is 40 semitones. If
720 // MIDI key is more than 40 semitones above unity note,
721 // the transpose is not done.
722 if (!SmplInfo.Unpitched && (MIDIKey - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey - (int) RgnInfo.UnityNote) * 100;
723
724 pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate));
725 pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange;
726 pitch.PitchBend = RTMath::CentsToFreqRatio(PitchBend * pitch.PitchBendRange);
727
728 return pitch;
729 }
730
731 double AbstractVoice::CalculateVolume(double velocityAttenuation) {
732 // For 16 bit samples, we downscale by 32768 to convert from
733 // int16 value range to DSP value range (which is
734 // -1.0..1.0). For 24 bit, we downscale from int32.
735 float volume = velocityAttenuation / (SmplInfo.BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f);
736
737 volume *= GetSampleAttenuation() * pEngineChannel->GlobalVolume * GLOBAL_VOLUME;
738
739 // the volume of release triggered samples depends on note length
740 if (Type & Voice::type_release_trigger) {
741 float noteLength = float(GetEngine()->FrameTime + Delay -
742 GetNoteOnTime(MIDIKey) ) / GetEngine()->SampleRate;
743
744 volume *= GetReleaseTriggerAttenuation(noteLength);
745 }
746
747 return volume;
748 }
749
750 float AbstractVoice::GetReleaseTriggerAttenuation(float noteLength) {
751 return 1 - RgnInfo.ReleaseTriggerDecay * noteLength;
752 }
753
754 void AbstractVoice::EnterReleaseStage() {
755 if (pSignalUnitRack == NULL) {
756 pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
757 pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE);
758 } else {
759 pSignalUnitRack->EnterReleaseStage();
760 }
761 }
762
763 bool AbstractVoice::EG1Finished() {
764 if (pSignalUnitRack == NULL) {
765 return pEG1->getSegmentType() == EG::segment_end;
766 } else {
767 return !pSignalUnitRack->GetEndpointUnit()->Active();
768 }
769 }
770
771 } // namespace LinuxSampler

  ViewVC Help
Powered by ViewVC