--- linuxsampler/trunk/src/engines/common/AbstractVoice.cpp 2011/07/11 17:52:01 2205 +++ linuxsampler/trunk/src/engines/common/AbstractVoice.cpp 2017/05/29 22:19:19 3251 @@ -4,7 +4,8 @@ * * * Copyright (C) 2003,2004 by Benno Senoner and Christian Schoenebeck * * Copyright (C) 2005-2008 Christian Schoenebeck * - * Copyright (C) 2009-2011 Christian Schoenebeck and Grigor Iliev * + * Copyright (C) 2009-2012 Christian Schoenebeck and Grigor Iliev * + * Copyright (C) 2013-2017 Christian Schoenebeck and Andreas Persson * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * @@ -26,7 +27,7 @@ namespace LinuxSampler { - AbstractVoice::AbstractVoice() { + AbstractVoice::AbstractVoice(SignalUnitRack* pRack): pSignalUnitRack(pRack) { pEngineChannel = NULL; pLFO1 = new LFOUnsigned(1.0f); // amplitude LFO (0..1 range) pLFO2 = new LFOUnsigned(1.0f); // filter LFO (0..1 range) @@ -43,12 +44,24 @@ finalSynthesisParameters.filterLeft.Reset(); finalSynthesisParameters.filterRight.Reset(); + + pEq = NULL; + bEqSupport = false; } AbstractVoice::~AbstractVoice() { if (pLFO1) delete pLFO1; if (pLFO2) delete pLFO2; if (pLFO3) delete pLFO3; + + if(pEq != NULL) delete pEq; + } + + void AbstractVoice::CreateEq() { + if(!bEqSupport) return; + if(pEq != NULL) delete pEq; + pEq = new EqSupport; + pEq->InitEffect(GetEngine()->pAudioOutputDevice); } /** @@ -98,12 +111,12 @@ #endif // CONFIG_DEVMODE Type = VoiceType; - MIDIKey = itNoteOnEvent->Param.Note.Key; + pNote = pEngineChannel->pEngine->NoteByID( itNoteOnEvent->Param.Note.ID ); PlaybackState = playback_state_init; // mark voice as triggered, but no audio rendered yet Delay = itNoteOnEvent->FragmentPos(); itTriggerEvent = itNoteOnEvent; itKillEvent = Pool::Iterator(); - MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey); + MidiKeyBase* pKeyInfo = GetMidiKeyInfo(MIDIKey()); pGroupEvents = iKeyGroup ? pEngineChannel->ActiveKeyGroups[iKeyGroup] : 0; @@ -111,10 +124,12 @@ RgnInfo = GetRegionInfo(); InstrInfo = GetInstrumentInfo(); + MIDIPan = CalculatePan(pEngineChannel->iLastPanRequest); + AboutToTrigger(); // calculate volume - const double velocityAttenuation = GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity); + const double velocityAttenuation = GetVelocityAttenuation(MIDIVelocity()); float volume = CalculateVolume(velocityAttenuation) * pKeyInfo->Volume; if (volume <= 0) return -1; @@ -124,24 +139,28 @@ SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, SmplInfo.BitDepth == 24); // get starting crossfade volume level - float crossfadeVolume = CalculateCrossfadeVolume(itNoteOnEvent->Param.Note.Velocity); + float crossfadeVolume = CalculateCrossfadeVolume(MIDIVelocity()); - VolumeLeft = volume * pKeyInfo->PanLeft * AbstractEngine::PanCurve[64 - RgnInfo.Pan]; - VolumeRight = volume * pKeyInfo->PanRight * AbstractEngine::PanCurve[64 + RgnInfo.Pan]; + VolumeLeft = volume * pKeyInfo->PanLeft; + VolumeRight = volume * pKeyInfo->PanRight; - float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE; + // this rate is used for rather mellow volume fades + const float subfragmentRate = GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE; + // this rate is used for very fast volume fades + const float quickRampRate = RTMath::Min(subfragmentRate, GetEngine()->SampleRate * 0.001f /* approx. 13ms */); CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate); - VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate); - PanLeftSmoother.trigger(pEngineChannel->GlobalPanLeft, subfragmentRate); - PanRightSmoother.trigger(pEngineChannel->GlobalPanRight, subfragmentRate); - finalSynthesisParameters.dPos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) - Pos = RgnInfo.SampleStartOffset; + VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate); + NoteVolume.setCurveOnly(pNote ? pNote->Override.VolumeCurve : DEFAULT_FADE_CURVE); + NoteVolume.setCurrentValue(pNote ? pNote->Override.Volume : 1.f); + NoteVolume.setDefaultDuration(pNote ? pNote->Override.VolumeTime : DEFAULT_NOTE_VOLUME_TIME_S); // Check if the sample needs disk streaming or is too short for that long cachedsamples = GetSampleCacheSize() / SmplInfo.FrameSize; DiskVoice = cachedsamples < SmplInfo.TotalFrameCount; + SetSampleStartOffset(); + if (DiskVoice) { // voice to be streamed from disk if (cachedsamples > (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) { MaxRAMPos = cachedsamples - (GetEngine()->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / SmplInfo.ChannelCount; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK) @@ -158,7 +177,7 @@ RAMLoop = (SmplInfo.HasLoops && (SmplInfo.LoopStart + SmplInfo.LoopLength) <= MaxRAMPos); if (OrderNewStream()) return -1; - dmsg(4,("Disk voice launched (cached samples: %d, total Samples: %d, MaxRAMPos: %d, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no")); + dmsg(4,("Disk voice launched (cached samples: %ld, total Samples: %d, MaxRAMPos: %lu, RAMLooping: %s)\n", cachedsamples, SmplInfo.TotalFrameCount, MaxRAMPos, (RAMLoop) ? "yes" : "no")); } else { // RAM only voice MaxRAMPos = cachedsamples; @@ -174,22 +193,45 @@ } Pitch = CalculatePitchInfo(PitchBend); + NotePitch.setCurveOnly(pNote ? pNote->Override.PitchCurve : DEFAULT_FADE_CURVE); + NotePitch.setCurrentValue(pNote ? pNote->Override.Pitch : 1.0f); + NotePitch.setDefaultDuration(pNote ? pNote->Override.PitchTime : DEFAULT_NOTE_PITCH_TIME_S); + NoteCutoff = (pNote) ? pNote->Override.Cutoff : 1.0f; + NoteResonance = (pNote) ? pNote->Override.Resonance : 1.0f; // the length of the decay and release curves are dependent on the velocity - const double velrelease = 1 / GetVelocityRelease(itNoteOnEvent->Param.Note.Velocity); + const double velrelease = 1 / GetVelocityRelease(MIDIVelocity()); - if (GetSignalUnitRack() == NULL) { // setup EG 1 (VCA EG) + if (pSignalUnitRack == NULL) { // setup EG 1 (VCA EG) // get current value of EG1 controller - double eg1controllervalue = GetEG1ControllerValue(itNoteOnEvent->Param.Note.Velocity); + double eg1controllervalue = GetEG1ControllerValue(MIDIVelocity()); // calculate influence of EG1 controller on EG1's parameters EGInfo egInfo = CalculateEG1ControllerInfluence(eg1controllervalue); - TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity); + if (pNote) { + egInfo.Attack *= pNote->Override.Attack; + egInfo.Decay *= pNote->Override.Decay; + egInfo.Release *= pNote->Override.Release; + } + + TriggerEG1(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, MIDIVelocity()); } else { - GetSignalUnitRack()->Trigger(); + pSignalUnitRack->Trigger(); } + const uint8_t pan = (pSignalUnitRack) ? pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan) : MIDIPan; + NotePanLeft = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 0 /*left*/ ) : 1.f; + NotePanRight = (pNote) ? AbstractEngine::PanCurveValueNorm(pNote->Override.Pan, 1 /*right*/) : 1.f; + PanLeftSmoother.trigger( + AbstractEngine::PanCurve[128 - pan] * NotePanLeft, + quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate) + ); + PanRightSmoother.trigger( + AbstractEngine::PanCurve[pan] * NotePanRight, + quickRampRate //NOTE: maybe we should have 2 separate pan smoothers, one for MIDI CC10 (with slow rate) and one for instrument script change_pan() calls (with fast rate) + ); + #ifdef CONFIG_INTERPOLATE_VOLUME // setup initial volume in synthesis parameters #ifdef CONFIG_PROCESS_MUTED_CHANNELS @@ -201,28 +243,28 @@ #else { float finalVolume; - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pEG1->getLevel(); } else { - finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * GetSignalUnitRack()->GetEndpointUnit()->GetVolume(); + finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * pSignalUnitRack->GetEndpointUnit()->GetVolume(); } - finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * pEngineChannel->GlobalPanLeft; - finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * pEngineChannel->GlobalPanRight; + finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * PanLeftSmoother.render(); + finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * PanRightSmoother.render(); } #endif #endif - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { // setup EG 2 (VCF Cutoff EG) { // get current value of EG2 controller - double eg2controllervalue = GetEG2ControllerValue(itNoteOnEvent->Param.Note.Velocity); + double eg2controllervalue = GetEG2ControllerValue(MIDIVelocity()); // calculate influence of EG2 controller on EG2's parameters EGInfo egInfo = CalculateEG2ControllerInfluence(eg2controllervalue); - TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, itNoteOnEvent->Param.Note.Velocity); + TriggerEG2(egInfo, velrelease, velocityAttenuation, GetEngine()->SampleRate, MIDIVelocity()); } @@ -231,7 +273,7 @@ // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f; float eg3depth = (bPortamento) - ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey) * 100) + ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey()) * 100) : RTMath::CentsToFreqRatio(RgnInfo.EG3Depth); float eg3time = (bPortamento) ? pEngineChannel->PortamentoTime @@ -281,7 +323,7 @@ VCFResonanceCtrl.value = pEngineChannel->ControllerTable[VCFResonanceCtrl.controller]; // calculate cutoff frequency - CutoffBase = CalculateCutoffBase(itNoteOnEvent->Param.Note.Velocity); + CutoffBase = CalculateCutoffBase(MIDIVelocity()); VCFCutoffCtrl.fvalue = CalculateFinalCutoff(CutoffBase); @@ -292,9 +334,34 @@ VCFCutoffCtrl.controller = 0; VCFResonanceCtrl.controller = 0; } + + const bool bEq = + pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport(); + + if (bEq) { + pEq->GetInChannelLeft()->Clear(); + pEq->GetInChannelRight()->Clear(); + pEq->RenderAudio(GetEngine()->pAudioOutputDevice->MaxSamplesPerCycle()); + } return 0; // success } + + void AbstractVoice::SetSampleStartOffset() { + double pos = RgnInfo.SampleStartOffset; // offset where we should start playback of sample + + // if another sample playback start position was requested by instrument + // script (built-in script function play_note()) + if (pNote && pNote->Override.SampleOffset >= 0) { + double overridePos = + double(SmplInfo.SampleRate) * double(pNote->Override.SampleOffset) / 1000000.0; + if (overridePos < SmplInfo.TotalFrameCount) + pos = overridePos; + } + + finalSynthesisParameters.dPos = pos; + Pos = pos; + } /** * Synthesizes the current audio fragment for this voice. @@ -305,14 +372,39 @@ * @param Skip - number of sample points to skip in output buffer */ void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { + bool delay = false; // Whether the voice playback should be delayed for this call + + if (pSignalUnitRack != NULL) { + uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger(); + if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback + if (delaySteps >= Samples) { + pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples); + delay = true; + } else { + pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps); + Samples -= delaySteps; + Skip += delaySteps; + } + } + } + AbstractEngineChannel* pChannel = pEngineChannel; - MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey); + MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey()); const bool bVoiceRequiresDedicatedRouting = pEngineChannel->GetFxSendCount() > 0 && (pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend); + + const bool bEq = + pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport(); - if (bVoiceRequiresDedicatedRouting) { + if (bEq) { + pEq->GetInChannelLeft()->Clear(); + pEq->GetInChannelRight()->Clear(); + finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip]; + finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip]; + pSignalUnitRack->UpdateEqSettings(pEq); + } else if (bVoiceRequiresDedicatedRouting) { finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip]; finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip]; } else { @@ -323,10 +415,10 @@ RTList::Iterator itCCEvent = pChannel->pEvents->first(); RTList::Iterator itNoteEvent; - GetFirstEventOnKey(MIDIKey, itNoteEvent); + GetFirstEventOnKey(HostKey(), itNoteEvent); RTList::Iterator itGroupEvent; - if (pGroupEvents) itGroupEvent = pGroupEvents->first(); + if (pGroupEvents && !Orphan) itGroupEvent = pGroupEvents->first(); if (itTriggerEvent) { // skip events that happened before this voice was triggered while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; @@ -343,7 +435,7 @@ } } - uint killPos; + uint killPos = 0; if (itKillEvent) { int maxFadeOutPos = Samples - GetEngine()->GetMinFadeOutSamples(); if (maxFadeOutPos < 0) { @@ -352,10 +444,10 @@ // drivers that use Samples < MaxSamplesPerCycle). // End the EG1 here, at pos 0, with a shorter max fade // out time. - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { pEG1->enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } else { - // TODO: + pSignalUnitRack->EnterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } itKillEvent = Pool::Iterator(); } else { @@ -371,11 +463,17 @@ fFinalCutoff = VCFCutoffCtrl.fvalue; fFinalResonance = VCFResonanceCtrl.fvalue; - // process MIDI control change and pitchbend events for this subfragment + // process MIDI control change, aftertouch and pitchbend events for this subfragment processCCEvents(itCCEvent, iSubFragmentEnd); + uint8_t pan = MIDIPan; + if (pSignalUnitRack != NULL) pan = pSignalUnitRack->GetEndpointUnit()->CalculatePan(MIDIPan); + + PanLeftSmoother.update(AbstractEngine::PanCurve[128 - pan] * NotePanLeft); + PanRightSmoother.update(AbstractEngine::PanCurve[pan] * NotePanRight); + + finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend * NotePitch.render(); - finalSynthesisParameters.fFinalPitch = Pitch.PitchBase * Pitch.PitchBend; - float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render(); + float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render() * NoteVolume.render(); #ifdef CONFIG_PROCESS_MUTED_CHANNELS if (pChannel->GetMute()) fFinalVolume = 0; #endif @@ -383,8 +481,8 @@ // process transition events (note on, note off & sustain pedal) processTransitionEvents(itNoteEvent, iSubFragmentEnd); processGroupEvents(itGroupEvent, iSubFragmentEnd); - - if (GetSignalUnitRack() == NULL) { + + if (pSignalUnitRack == NULL) { // if the voice was killed in this subfragment, or if the // filter EG is finished, switch EG1 to fade out stage if ((itKillEvent && killPos <= iSubFragmentEnd) || @@ -427,28 +525,35 @@ // process low frequency oscillators if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render()); - if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); + if (bLFO2Enabled) fFinalCutoff *= (1.0f - pLFO2->render()); if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); } else { - // if the voice was killed in this subfragment, or if the - // filter EG is finished, switch EG1 to fade out stage - /*if ((itKillEvent && killPos <= iSubFragmentEnd) || - (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && - pEG2->getSegmentType() == EG::segment_end)) { + // if the voice was killed in this subfragment, enter fade out stage + if (itKillEvent && killPos <= iSubFragmentEnd) { + pSignalUnitRack->EnterFadeOutStage(); + itKillEvent = Pool::Iterator(); + } + + // if the filter EG is finished, switch EG1 to fade out stage + /*if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && + pEG2->getSegmentType() == EG::segment_end) { pEG1->enterFadeOutStage(); itKillEvent = Pool::Iterator(); }*/ // TODO: ^^^ - fFinalVolume *= GetSignalUnitRack()->GetEndpointUnit()->GetVolume(); - fFinalCutoff = GetSignalUnitRack()->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff); - fFinalResonance = GetSignalUnitRack()->GetEndpointUnit()->CalculateResonance(fFinalResonance); + fFinalVolume *= pSignalUnitRack->GetEndpointUnit()->GetVolume(); + fFinalCutoff = pSignalUnitRack->GetEndpointUnit()->CalculateFilterCutoff(fFinalCutoff); + fFinalResonance = pSignalUnitRack->GetEndpointUnit()->CalculateResonance(fFinalResonance); finalSynthesisParameters.fFinalPitch = - GetSignalUnitRack()->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch); + pSignalUnitRack->GetEndpointUnit()->CalculatePitch(finalSynthesisParameters.fFinalPitch); } - + + fFinalCutoff *= NoteCutoff; + fFinalResonance *= NoteResonance; + // limit the pitch so we don't read outside the buffer finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH)); @@ -481,19 +586,19 @@ fFinalVolume * VolumeRight * PanRightSmoother.render(); #endif // render audio for one subfragment - RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); + if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { // stop the rendering if volume EG is finished if (pEG1->getSegmentType() == EG::segment_end) break; } else { // stop the rendering if the endpoint unit is not active - if (!GetSignalUnitRack()->GetEndpointUnit()->Active()) break; + if (!pSignalUnitRack->GetEndpointUnit()->Active()) break; } const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch; - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { // increment envelopes' positions if (pEG1->active()) { @@ -518,62 +623,81 @@ }*/ // TODO: ^^^ - GetSignalUnitRack()->Increment(); + if (!delay) pSignalUnitRack->Increment(); } Pos = newPos; i = iSubFragmentEnd; } + + if (delay) return; if (bVoiceRequiresDedicatedRouting) { + if (bEq) { + pEq->RenderAudio(Samples); + pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples); + pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples); + } optional effectSendLevels[2] = { pMidiKeyInfo->ReverbSend, pMidiKeyInfo->ChorusSend }; GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples); + } else if (bEq) { + pEq->RenderAudio(Samples); + pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples); + pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples); } } /** - * Process given list of MIDI control change and pitch bend events for - * the given time. + * Process given list of MIDI control change, aftertouch and pitch bend + * events for the given time. * * @param itEvent - iterator pointing to the next event to be processed * @param End - youngest time stamp where processing should be stopped */ void AbstractVoice::processCCEvents(RTList::Iterator& itEvent, uint End) { for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { - if (itEvent->Type == Event::type_control_change && itEvent->Param.CC.Controller) { // if (valid) MIDI control change event + if ((itEvent->Type == Event::type_control_change || itEvent->Type == Event::type_channel_pressure) + && itEvent->Param.CC.Controller) // if (valid) MIDI control change event + { if (itEvent->Param.CC.Controller == VCFCutoffCtrl.controller) { ProcessCutoffEvent(itEvent); } if (itEvent->Param.CC.Controller == VCFResonanceCtrl.controller) { processResonanceEvent(itEvent); } - if (GetSignalUnitRack() == NULL) { + if (itEvent->Param.CC.Controller == CTRL_TABLE_IDX_AFTERTOUCH || + itEvent->Type == Event::type_channel_pressure) + { + ProcessChannelPressureEvent(itEvent); + } + if (pSignalUnitRack == NULL) { if (itEvent->Param.CC.Controller == pLFO1->ExtController) { - pLFO1->update(itEvent->Param.CC.Value); + pLFO1->updateByMIDICtrlValue(itEvent->Param.CC.Value); } if (itEvent->Param.CC.Controller == pLFO2->ExtController) { - pLFO2->update(itEvent->Param.CC.Value); + pLFO2->updateByMIDICtrlValue(itEvent->Param.CC.Value); } if (itEvent->Param.CC.Controller == pLFO3->ExtController) { - pLFO3->update(itEvent->Param.CC.Value); + pLFO3->updateByMIDICtrlValue(itEvent->Param.CC.Value); } } if (itEvent->Param.CC.Controller == 7) { // volume VolumeSmoother.update(AbstractEngine::VolumeCurve[itEvent->Param.CC.Value]); } else if (itEvent->Param.CC.Controller == 10) { // panpot - PanLeftSmoother.update(AbstractEngine::PanCurve[128 - itEvent->Param.CC.Value]); - PanRightSmoother.update(AbstractEngine::PanCurve[itEvent->Param.CC.Value]); + MIDIPan = CalculatePan(itEvent->Param.CC.Value); } } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event processPitchEvent(itEvent); + } else if (itEvent->Type == Event::type_note_pressure) { + ProcessPolyphonicKeyPressureEvent(itEvent); } ProcessCCEvent(itEvent); - if (GetSignalUnitRack() != NULL) { - GetSignalUnitRack()->ProcessCCEvent(itEvent); + if (pSignalUnitRack != NULL) { + pSignalUnitRack->ProcessCCEvent(itEvent); } } } @@ -593,8 +717,8 @@ } /** - * Process given list of MIDI note on, note off and sustain pedal events - * for the given time. + * Process given list of MIDI note on, note off, sustain pedal events and + * note synthesis parameter events for the given time. * * @param itEvent - iterator pointing to the next event to be processed * @param End - youngest time stamp where processing should be stopped @@ -603,17 +727,81 @@ for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { // some voice types ignore note off if (!(Type & (Voice::type_one_shot | Voice::type_release_trigger | Voice::type_controller_triggered))) { - if (itEvent->Type == Event::type_release) { + if (itEvent->Type == Event::type_release_key) { EnterReleaseStage(); - } else if (itEvent->Type == Event::type_cancel_release) { - if (GetSignalUnitRack() == NULL) { + } else if (itEvent->Type == Event::type_cancel_release_key) { + if (pSignalUnitRack == NULL) { pEG1->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); pEG2->update(EG::event_cancel_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } else { - GetSignalUnitRack()->CancelRelease(); + pSignalUnitRack->CancelRelease(); } } } + // process stop-note events (caused by built-in instrument script function note_off()) + if (itEvent->Type == Event::type_release_note && pNote && + pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote) + { + EnterReleaseStage(); + } + // process kill-note events (caused by built-in instrument script function fade_out()) + if (itEvent->Type == Event::type_kill_note && pNote && + pEngineChannel->pEngine->NoteByID( itEvent->Param.Note.ID ) == pNote) + { + Kill(itEvent); + } + // process synthesis parameter events (caused by built-in realt-time instrument script functions) + if (itEvent->Type == Event::type_note_synth_param && pNote && + pEngineChannel->pEngine->NoteByID( itEvent->Param.NoteSynthParam.NoteID ) == pNote) + { + switch (itEvent->Param.NoteSynthParam.Type) { + case Event::synth_param_volume: + NoteVolume.fadeTo(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + break; + case Event::synth_param_volume_time: + NoteVolume.setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue); + break; + case Event::synth_param_volume_curve: + NoteVolume.setCurve((fade_curve_t)itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + break; + case Event::synth_param_pitch: + NotePitch.fadeTo(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + break; + case Event::synth_param_pitch_time: + NotePitch.setDefaultDuration(itEvent->Param.NoteSynthParam.AbsValue); + break; + case Event::synth_param_pitch_curve: + NotePitch.setCurve((fade_curve_t)itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + break; + case Event::synth_param_pan: + NotePanLeft = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 0 /*left*/); + NotePanRight = AbstractEngine::PanCurveValueNorm(itEvent->Param.NoteSynthParam.AbsValue, 1 /*right*/); + break; + case Event::synth_param_cutoff: + NoteCutoff = itEvent->Param.NoteSynthParam.AbsValue; + break; + case Event::synth_param_resonance: + NoteResonance = itEvent->Param.NoteSynthParam.AbsValue; + break; + case Event::synth_param_amp_lfo_depth: + pLFO1->setScriptDepthFactor(itEvent->Param.NoteSynthParam.AbsValue); + break; + case Event::synth_param_amp_lfo_freq: + pLFO1->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + break; + case Event::synth_param_pitch_lfo_depth: + pLFO3->setScriptDepthFactor(itEvent->Param.NoteSynthParam.AbsValue); + break; + case Event::synth_param_pitch_lfo_freq: + pLFO3->setScriptFrequencyFactor(itEvent->Param.NoteSynthParam.AbsValue, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + break; + + case Event::synth_param_attack: + case Event::synth_param_decay: + case Event::synth_param_release: + break; // noop + } + } } } @@ -638,9 +826,9 @@ * @param itNoteOffEvent - event which causes this voice to die soon */ void AbstractVoice::UpdatePortamentoPos(Pool::Iterator& itNoteOffEvent) { - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos()); - pEngineChannel->PortamentoPos = (float) MIDIKey + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f; + pEngineChannel->PortamentoPos = (float) MIDIKey() + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f; } else { // TODO: } @@ -667,12 +855,12 @@ Voice::PitchInfo AbstractVoice::CalculatePitchInfo(int PitchBend) { PitchInfo pitch; - double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey % 12]; + double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12]; // GSt behaviour: maximum transpose up is 40 semitones. If // MIDI key is more than 40 semitones above unity note, // the transpose is not done. - if (!SmplInfo.Unpitched && (MIDIKey - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey - (int) RgnInfo.UnityNote) * 100; + if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100; pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate)); pitch.PitchBendRange = 1.0 / 8192.0 * 100.0 * InstrInfo.PitchbendRange; @@ -680,6 +868,19 @@ return pitch; } + + void AbstractVoice::onScaleTuningChanged() { + PitchInfo pitch = this->Pitch; + double pitchbasecents = InstrInfo.FineTune + RgnInfo.FineTune + GetEngine()->ScaleTuning[MIDIKey() % 12]; + + // GSt behaviour: maximum transpose up is 40 semitones. If + // MIDI key is more than 40 semitones above unity note, + // the transpose is not done. + if (!SmplInfo.Unpitched && (MIDIKey() - (int) RgnInfo.UnityNote) < 40) pitchbasecents += (MIDIKey() - (int) RgnInfo.UnityNote) * 100; + + pitch.PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(SmplInfo.SampleRate) / double(GetEngine()->SampleRate)); + this->Pitch = pitch; + } double AbstractVoice::CalculateVolume(double velocityAttenuation) { // For 16 bit samples, we downscale by 32768 to convert from @@ -692,7 +893,7 @@ // the volume of release triggered samples depends on note length if (Type & Voice::type_release_trigger) { float noteLength = float(GetEngine()->FrameTime + Delay - - GetNoteOnTime(MIDIKey) ) / GetEngine()->SampleRate; + GetNoteOnTime(MIDIKey()) ) / GetEngine()->SampleRate; volume *= GetReleaseTriggerAttenuation(noteLength); } @@ -705,19 +906,19 @@ } void AbstractVoice::EnterReleaseStage() { - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { pEG1->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); pEG2->update(EG::event_release, GetEngine()->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } else { - GetSignalUnitRack()->EnterReleaseStage(); + pSignalUnitRack->EnterReleaseStage(); } } bool AbstractVoice::EG1Finished() { - if (GetSignalUnitRack() == NULL) { + if (pSignalUnitRack == NULL) { return pEG1->getSegmentType() == EG::segment_end; } else { - return !GetSignalUnitRack()->GetEndpointUnit()->Active(); + return !pSignalUnitRack->GetEndpointUnit()->Active(); } }