4 |
* * |
* * |
5 |
* Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck * |
* Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck * |
6 |
* Copyright (C) 2005-2008 Christian Schoenebeck * |
* Copyright (C) 2005-2008 Christian Schoenebeck * |
7 |
* Copyright (C) 2009-2011 Christian Schoenebeck and Grigor Iliev * |
* Copyright (C) 2009-2012 Christian Schoenebeck and Grigor Iliev * |
8 |
* * |
* * |
9 |
* This program is free software; you can redistribute it and/or modify * |
* This program is free software; you can redistribute it and/or modify * |
10 |
* it under the terms of the GNU General Public License as published by * |
* it under the terms of the GNU General Public License as published by * |
43 |
|
|
44 |
finalSynthesisParameters.filterLeft.Reset(); |
finalSynthesisParameters.filterLeft.Reset(); |
45 |
finalSynthesisParameters.filterRight.Reset(); |
finalSynthesisParameters.filterRight.Reset(); |
46 |
|
|
47 |
|
pEq = NULL; |
48 |
|
bEqSupport = false; |
49 |
} |
} |
50 |
|
|
51 |
AbstractVoice::~AbstractVoice() { |
AbstractVoice::~AbstractVoice() { |
52 |
if (pLFO1) delete pLFO1; |
if (pLFO1) delete pLFO1; |
53 |
if (pLFO2) delete pLFO2; |
if (pLFO2) delete pLFO2; |
54 |
if (pLFO3) delete pLFO3; |
if (pLFO3) delete pLFO3; |
55 |
|
|
56 |
|
if(pEq != NULL) delete pEq; |
57 |
|
} |
58 |
|
|
59 |
|
void AbstractVoice::CreateEq() { |
60 |
|
if(!bEqSupport) return; |
61 |
|
if(pEq != NULL) delete pEq; |
62 |
|
pEq = new EqSupport; |
63 |
|
pEq->InitEffect(GetEngine()->pAudioOutputDevice); |
64 |
} |
} |
65 |
|
|
66 |
/** |
/** |
112 |
Type = VoiceType; |
Type = VoiceType; |
113 |
MIDIKey = itNoteOnEvent->Param.Note.Key; |
MIDIKey = itNoteOnEvent->Param.Note.Key; |
114 |
MIDIVelocity = itNoteOnEvent->Param.Note.Velocity; |
MIDIVelocity = itNoteOnEvent->Param.Note.Velocity; |
|
MIDIPan = pEngineChannel->ControllerTable[10]; |
|
|
if (MIDIPan == 0 && pEngineChannel->GlobalPanRight == 1) MIDIPan = 64; // workaround used to determine whether the MIDI pan has not been set |
|
115 |
PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet |
PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet |
116 |
Delay = itNoteOnEvent->FragmentPos(); |
Delay = itNoteOnEvent->FragmentPos(); |
117 |
itTriggerEvent = itNoteOnEvent; |
itTriggerEvent = itNoteOnEvent; |
124 |
RgnInfo = GetRegionInfo(); |
RgnInfo = GetRegionInfo(); |
125 |
InstrInfo = GetInstrumentInfo(); |
InstrInfo = GetInstrumentInfo(); |
126 |
|
|
127 |
|
MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest); |
128 |
|
|
129 |
AboutToTrigger(); |
AboutToTrigger(); |
130 |
|
|
131 |
// calculate volume |
// calculate volume |
141 |
// get starting crossfade volume level |
// get starting crossfade volume level |
142 |
float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity); |
float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity); |
143 |
|
|
144 |
VolumeLeft = volume * pKeyInfo->PanLeft * AbstractEngine::PanCurve[64 - RgnInfo.Pan]; |
VolumeLeft = volume * pKeyInfo->PanLeft; |
145 |
VolumeRight = volume * pKeyInfo->PanRight * AbstractEngine::PanCurve[64 + RgnInfo.Pan]; |
VolumeRight = volume * pKeyInfo->PanRight; |
146 |
|
|
147 |
float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE; |
float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE; |
148 |
CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate); |
CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate); |
149 |
VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate); |
VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate); |
|
PanLeftSmoother.trigger(pEngineChannel->GlobalPanLeft, subfragmentRate); |
|
|
PanRightSmoother.trigger(pEngineChannel->GlobalPanRight, subfragmentRate); |
|
150 |
|
|
151 |
// Check if the sample needs disk streaming or is too short for that |
// Check if the sample needs disk streaming or is too short for that |
152 |
long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize; |
long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize; |
202 |
pSignalUnitRack->Trigger(); |
pSignalUnitRack->Trigger(); |
203 |
} |
} |
204 |
|
|
205 |
|
uint8_t pan = MIDIPan; |
206 |
|
if (pSignalUnitRack) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan); |
207 |
|
PanLeftSmoother.trigger(AbstractEngine::PanCurve[128 - pan], subfragmentRate); |
208 |
|
PanRightSmoother.trigger(AbstractEngine::PanCurve[pan], subfragmentRate); |
209 |
|
|
210 |
#ifdef CONFIG_INTERPOLATE_VOLUME |
#ifdef CONFIG_INTERPOLATE_VOLUME |
211 |
// setup initial volume in synthesis parameters |
// setup initial volume in synthesis parameters |
212 |
#ifdef CONFIG_PROCESS_MUTED_CHANNELS |
#ifdef CONFIG_PROCESS_MUTED_CHANNELS |
224 |
finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume(); |
finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume(); |
225 |
} |
} |
226 |
|
|
227 |
finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * pEngineChannel->GlobalPanLeft; |
finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * PanLeftSmoother.render(); |
228 |
finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * pEngineChannel->GlobalPanRight; |
finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * PanRightSmoother.render(); |
229 |
} |
} |
230 |
#endif |
#endif |
231 |
#endif |
#endif |
309 |
VCFCutoffCtrl.controller = 0; |
VCFCutoffCtrl.controller = 0; |
310 |
VCFResonanceCtrl.controller = 0; |
VCFResonanceCtrl.controller = 0; |
311 |
} |
} |
312 |
|
|
313 |
|
const bool bEq = |
314 |
|
pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport(); |
315 |
|
|
316 |
|
if (bEq) { |
317 |
|
pEq->GetInChannelLeft()->Clear(); |
318 |
|
pEq->GetInChannelRight()->Clear(); |
319 |
|
pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle()); |
320 |
|
} |
321 |
|
|
322 |
return 0; // success |
return 0; // success |
323 |
} |
} |
336 |
* @param Skip - number of sample points to skip in output buffer |
* @param Skip - number of sample points to skip in output buffer |
337 |
*/ |
*/ |
338 |
void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
339 |
|
bool delay = false; // Whether the voice playback should be delayed for this call |
340 |
|
|
341 |
|
if (pSignalUnitRack != NULL) { |
342 |
|
uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger(); |
343 |
|
if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback |
344 |
|
if (delaySteps >= Samples) { |
345 |
|
pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples); |
346 |
|
delay = true; |
347 |
|
} else { |
348 |
|
pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps); |
349 |
|
Samples -= delaySteps; |
350 |
|
Skip += delaySteps; |
351 |
|
} |
352 |
|
} |
353 |
|
} |
354 |
|
|
355 |
AbstractEngineChannel* pChannel = pEngineChannel; |
AbstractEngineChannel* pChannel = pEngineChannel; |
356 |
MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey); |
MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey); |
357 |
|
|
358 |
const bool bVoiceRequiresDedicatedRouting = |
const bool bVoiceRequiresDedicatedRouting = |
359 |
pEngineChannel->GetFxSendCount() > 0 && |
pEngineChannel->GetFxSendCount() > 0 && |
360 |
(pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend); |
(pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend); |
361 |
|
|
362 |
|
const bool bEq = |
363 |
|
pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport(); |
364 |
|
|
365 |
if (bVoiceRequiresDedicatedRouting) { |
if (bEq) { |
366 |
|
pEq->GetInChannelLeft()->Clear(); |
367 |
|
pEq->GetInChannelRight()->Clear(); |
368 |
|
finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip]; |
369 |
|
finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip]; |
370 |
|
pSignalUnitRack->UpdateEqSettings(pEq); |
371 |
|
} else if (bVoiceRequiresDedicatedRouting) { |
372 |
finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip]; |
finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip]; |
373 |
finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip]; |
finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip]; |
374 |
} else { |
} else { |
382 |
GetFirstEventOnKey(MIDIKey, itNoteEvent); |
GetFirstEventOnKey(MIDIKey, itNoteEvent); |
383 |
|
|
384 |
RTList<Event>::Iterator itGroupEvent; |
RTList<Event>::Iterator itGroupEvent; |
385 |
if (pGroupEvents) itGroupEvent = pGroupEvents->first(); |
if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first(); |
386 |
|
|
387 |
if (itTriggerEvent) { // skip events that happened before this voice was triggered |
if (itTriggerEvent) { // skip events that happened before this voice was triggered |
388 |
while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; |
while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; |
411 |
if (pSignalUnitRack == NULL) { |
if (pSignalUnitRack == NULL) { |
412 |
pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
413 |
} else { |
} else { |
414 |
// TODO: |
pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
415 |
} |
} |
416 |
itKillEvent = Pool<Event>::Iterator(); |
itKillEvent = Pool<Event>::Iterator(); |
417 |
} else { |
} else { |
427 |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
428 |
fFinalResonance = VCFResonanceCtrl.fvalue; |
fFinalResonance = VCFResonanceCtrl.fvalue; |
429 |
|
|
430 |
// process MIDI control change and pitchbend events for this subfragment |
// process MIDI control change, aftertouch and pitchbend events for this subfragment |
431 |
processCCEvents(itCCEvent, iSubFragmentEnd); |
processCCEvents(itCCEvent, iSubFragmentEnd); |
432 |
uint8_t pan = MIDIPan; |
uint8_t pan = MIDIPan; |
433 |
if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CaluclatePan(pan); |
if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan); |
434 |
|
|
435 |
PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan]); |
PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan]); |
436 |
PanRightSmoother.update(AbstractEngine::PanCurve[pan]); |
PanRightSmoother.update(AbstractEngine::PanCurve[pan]); |
444 |
// process transition events (note on, note off & sustain pedal) |
// process transition events (note on, note off & sustain pedal) |
445 |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
446 |
processGroupEvents(itGroupEvent, iSubFragmentEnd); |
processGroupEvents(itGroupEvent, iSubFragmentEnd); |
447 |
|
|
448 |
if (pSignalUnitRack == NULL) { |
if (pSignalUnitRack == NULL) { |
449 |
// if the voice was killed in this subfragment, or if the |
// if the voice was killed in this subfragment, or if the |
450 |
// filter EG is finished, switch EG1 to fade out stage |
// filter EG is finished, switch EG1 to fade out stage |
491 |
if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); |
if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); |
492 |
if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
493 |
} else { |
} else { |
494 |
// if the voice was killed in this subfragment, or if the |
// if the voice was killed in this subfragment, enter fade out stage |
495 |
// filter EG is finished, switch EG1 to fade out stage |
if (itKillEvent && killPos <= iSubFragmentEnd) { |
496 |
/*if ((itKillEvent && killPos <= iSubFragmentEnd) || |
pSignalUnitRack->EnterFadeOutStage(); |
497 |
(SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && |
itKillEvent = Pool<Event>::Iterator(); |
498 |
pEG2->getSegmentType() == EG::segment_end)) { |
} |
499 |
|
|
500 |
|
// if the filter EG is finished, switch EG1 to fade out stage |
501 |
|
/*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && |
502 |
|
pEG2->getSegmentType() == EG::segment_end) { |
503 |
pEG1->enterFadeOutStage(); |
pEG1->enterFadeOutStage(); |
504 |
itKillEvent = Pool<Event>::Iterator(); |
itKillEvent = Pool<Event>::Iterator(); |
505 |
}*/ |
}*/ |
546 |
fFinalVolume * VolumeRight * PanRightSmoother.render(); |
fFinalVolume * VolumeRight * PanRightSmoother.render(); |
547 |
#endif |
#endif |
548 |
// render audio for one subfragment |
// render audio for one subfragment |
549 |
RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
550 |
|
|
551 |
if (pSignalUnitRack == NULL) { |
if (pSignalUnitRack == NULL) { |
552 |
// stop the rendering if volume EG is finished |
// stop the rendering if volume EG is finished |
583 |
}*/ |
}*/ |
584 |
// TODO: ^^^ |
// TODO: ^^^ |
585 |
|
|
586 |
pSignalUnitRack->Increment(); |
if (!delay) pSignalUnitRack->Increment(); |
587 |
} |
} |
588 |
|
|
589 |
Pos = newPos; |
Pos = newPos; |
590 |
i = iSubFragmentEnd; |
i = iSubFragmentEnd; |
591 |
} |
} |
592 |
|
|
593 |
|
if (delay) return; |
594 |
|
|
595 |
if (bVoiceRequiresDedicatedRouting) { |
if (bVoiceRequiresDedicatedRouting) { |
596 |
|
if (bEq) { |
597 |
|
pEq->RenderAudio(Samples); |
598 |
|
pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples); |
599 |
|
pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples); |
600 |
|
} |
601 |
optional<float> effectSendLevels[2] = { |
optional<float> effectSendLevels[2] = { |
602 |
pMidiKeyInfo->ReverbSend, |
pMidiKeyInfo->ReverbSend, |
603 |
pMidiKeyInfo->ChorusSend |
pMidiKeyInfo->ChorusSend |
604 |
}; |
}; |
605 |
GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples); |
GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples); |
606 |
|
} else if (bEq) { |
607 |
|
pEq->RenderAudio(Samples); |
608 |
|
pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples); |
609 |
|
pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples); |
610 |
} |
} |
611 |
} |
} |
612 |
|
|
613 |
/** |
/** |
614 |
* Process given list of MIDI control change and pitch bend events for |
* Process given list of MIDI control change, aftertouch and pitch bend |
615 |
* the given time. |
* events for the given time. |
616 |
* |
* |
617 |
* @param itEvent - iterator pointing to the next event to be processed |
* @param itEvent - iterator pointing to the next event to be processed |
618 |
* @param End - youngest time stamp where processing should be stopped |
* @param End - youngest time stamp where processing should be stopped |
640 |
if (itEvent->Param.CC.Controller == 7) { // volume |
if (itEvent->Param.CC.Controller == 7) { // volume |
641 |
VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]); |
VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]); |
642 |
} else if (itEvent->Param.CC.Controller == 10) { // panpot |
} else if (itEvent->Param.CC.Controller == 10) { // panpot |
643 |
MIDIPan = itEvent->Param.CC.Value; |
MIDIPan = CalculatePan(itEvent->Param.CC.Value); |
644 |
} |
} |
645 |
} else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event |
} else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event |
646 |
processPitchEvent(itEvent); |
processPitchEvent(itEvent); |
647 |
|
} else if (itEvent->Type == Event::type_channel_pressure) { |
648 |
|
ProcessChannelPressureEvent(itEvent); |
649 |
|
} else if (itEvent->Type == Event::type_note_pressure) { |
650 |
|
ProcessPolyphonicKeyPressureEvent(itEvent); |
651 |
} |
} |
652 |
|
|
653 |
ProcessCCEvent(itEvent); |
ProcessCCEvent(itEvent); |
759 |
|
|
760 |
return pitch; |
return pitch; |
761 |
} |
} |
762 |
|
|
763 |
|
void AbstractVoice::onScaleTuningChanged() { |
764 |
|
PitchInfo pitch = this->Pitch; |
765 |
|
double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey % 12]; |
766 |
|
|
767 |
|
// GSt behaviour: maximum transpose up is 40 semitones. If |
768 |
|
// MIDI key is more than 40 semitones above unity note, |
769 |
|
// the transpose is not done. |
770 |
|
if (!SmplInfo.Unpitched && (MIDIKey - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey - (int) RgnInfo.UnityNote) * 100; |
771 |
|
|
772 |
|
pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate)); |
773 |
|
this->Pitch = pitch; |
774 |
|
} |
775 |
|
|
776 |
double AbstractVoice::CalculateVolume(double velocityAttenuation) { |
double AbstractVoice::CalculateVolume(double velocityAttenuation) { |
777 |
// For 16 bit samples, we downscale by 32768 to convert from |
// For 16 bit samples, we downscale by 32768 to convert from |