--- linuxsampler/trunk/src/engines/gig/Voice.cpp 2005/09/03 11:14:30 769 +++ linuxsampler/trunk/src/engines/gig/Voice.cpp 2009/05/03 12:15:40 1895 @@ -3,7 +3,7 @@ * LinuxSampler - modular, streaming capable sampler * * * * Copyright (C) 2003, 2004 by Benno Senoner and Christian Schoenebeck * - * Copyright (C) 2005 Christian Schoenebeck * + * Copyright (C) 2005 - 2009 Christian Schoenebeck * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * @@ -23,17 +23,12 @@ #include "../../common/Features.h" #include "Synthesizer.h" +#include "Profiler.h" #include "Voice.h" namespace LinuxSampler { namespace gig { - const float Voice::FILTER_CUTOFF_COEFF(CalculateFilterCutoffCoeff()); - - float Voice::CalculateFilterCutoffCoeff() { - return log(CONFIG_FILTER_CUTOFF_MAX / CONFIG_FILTER_CUTOFF_MIN); - } - Voice::Voice() { pEngine = NULL; pDiskThread = NULL; @@ -43,16 +38,16 @@ pLFO3 = new LFOSigned(1200.0f); // pitch EG (-1200..+1200 range) KeyGroup = 0; SynthesisMode = 0; // set all mode bits to 0 first - // select synthesis implementation (currently either pure C++ or MMX+SSE(1)) - #if CONFIG_ASM && ARCH_X86 + // select synthesis implementation (asm core is not supported ATM) + #if 0 // CONFIG_ASM && ARCH_X86 SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, Features::supportsMMX() && Features::supportsSSE()); #else SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); #endif - SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, true); + SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, Profiler::isEnabled()); - FilterLeft.Reset(); - FilterRight.Reset(); + finalSynthesisParameters.filterLeft.Reset(); + finalSynthesisParameters.filterRight.Reset(); } Voice::~Voice() { @@ -84,6 +79,7 @@ int Voice::Trigger(EngineChannel* pEngineChannel, Pool::Iterator& itNoteOnEvent, int PitchBend, ::gig::DimensionRegion* pDimRgn, type_t VoiceType, int iKeyGroup) { this->pEngineChannel = pEngineChannel; this->pDimRgn = pDimRgn; + Orphan = false; #if CONFIG_DEVMODE if (itNoteOnEvent->FragmentPos() > pEngine->MaxSamplesPerCycle) { // just a sanity check for debugging @@ -103,9 +99,12 @@ // calculate volume const double velocityAttenuation = pDimRgn->GetVelocityAttenuation(itNoteOnEvent->Param.Note.Velocity); - Volume = velocityAttenuation / 32768.0f; // we downscale by 32768 to convert from int16 value range to DSP value range (which is -1.0..1.0) + // For 16 bit samples, we downscale by 32768 to convert from + // int16 value range to DSP value range (which is + // -1.0..1.0). For 24 bit, we downscale from int32. + float volume = velocityAttenuation / (pSample->BitDepth == 16 ? 32768.0f : 32768.0f * 65536.0f); - Volume *= pDimRgn->SampleAttenuation; + volume *= pDimRgn->SampleAttenuation * pEngineChannel->GlobalVolume * GLOBAL_VOLUME; // the volume of release triggered samples depends on note length if (Type == type_release_trigger) { @@ -113,48 +112,65 @@ pEngineChannel->pMIDIKeyInfo[MIDIKey].NoteOnTime) / pEngine->SampleRate; float attenuation = 1 - 0.01053 * (256 >> pDimRgn->ReleaseTriggerDecay) * noteLength; if (attenuation <= 0) return -1; - Volume *= attenuation; + volume *= attenuation; } // select channel mode (mono or stereo) SYNTHESIS_MODE_SET_CHANNELS(SynthesisMode, pSample->Channels == 2); + // select bit depth (16 or 24) + SYNTHESIS_MODE_SET_BITDEPTH24(SynthesisMode, pSample->BitDepth == 24); // get starting crossfade volume level + float crossfadeVolume; switch (pDimRgn->AttenuationController.type) { case ::gig::attenuation_ctrl_t::type_channelaftertouch: - CrossfadeVolume = 1.0f; //TODO: aftertouch not supported yet + crossfadeVolume = Engine::CrossfadeCurve[CrossfadeAttenuation(pEngineChannel->ControllerTable[128])]; break; case ::gig::attenuation_ctrl_t::type_velocity: - CrossfadeVolume = CrossfadeAttenuation(itNoteOnEvent->Param.Note.Velocity); + crossfadeVolume = Engine::CrossfadeCurve[CrossfadeAttenuation(itNoteOnEvent->Param.Note.Velocity)]; break; case ::gig::attenuation_ctrl_t::type_controlchange: //FIXME: currently not sample accurate - CrossfadeVolume = CrossfadeAttenuation(pEngineChannel->ControllerTable[pDimRgn->AttenuationController.controller_number]); + crossfadeVolume = Engine::CrossfadeCurve[CrossfadeAttenuation(pEngineChannel->ControllerTable[pDimRgn->AttenuationController.controller_number])]; break; case ::gig::attenuation_ctrl_t::type_none: // no crossfade defined default: - CrossfadeVolume = 1.0f; + crossfadeVolume = 1.0f; } - PanLeft = 1.0f - float(RTMath::Max(pDimRgn->Pan, 0)) / 63.0f; - PanRight = 1.0f - float(RTMath::Min(pDimRgn->Pan, 0)) / -64.0f; + VolumeLeft = volume * Engine::PanCurve[64 - pDimRgn->Pan]; + VolumeRight = volume * Engine::PanCurve[64 + pDimRgn->Pan]; + + float subfragmentRate = pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE; + CrossfadeSmoother.trigger(crossfadeVolume, subfragmentRate); + VolumeSmoother.trigger(pEngineChannel->MidiVolume, subfragmentRate); + PanLeftSmoother.trigger(pEngineChannel->GlobalPanLeft, subfragmentRate); + PanRightSmoother.trigger(pEngineChannel->GlobalPanRight, subfragmentRate); - Pos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) + finalSynthesisParameters.dPos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) + Pos = pDimRgn->SampleStartOffset; // Check if the sample needs disk streaming or is too short for that long cachedsamples = pSample->GetCache().Size / pSample->FrameSize; DiskVoice = cachedsamples < pSample->SamplesTotal; + const DLS::sample_loop_t& loopinfo = pDimRgn->pSampleLoops[0]; + if (DiskVoice) { // voice to be streamed from disk - MaxRAMPos = cachedsamples - (pEngine->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / pSample->Channels; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK) + if (cachedsamples > (pEngine->MaxSamplesPerCycle << CONFIG_MAX_PITCH)) { + MaxRAMPos = cachedsamples - (pEngine->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / pSample->Channels; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK) + } else { + // The cache is too small to fit a max sample buffer. + // Setting MaxRAMPos to 0 will probably cause a click + // in the audio, but it's better than not handling + // this case at all, which would have caused the + // unsigned MaxRAMPos to be set to a negative number. + MaxRAMPos = 0; + } // check if there's a loop defined which completely fits into the cached (RAM) part of the sample - if (pSample->Loops && pSample->LoopEnd <= MaxRAMPos) { - RAMLoop = true; - LoopCyclesLeft = pSample->LoopPlayCount; - } - else RAMLoop = false; + RAMLoop = (pDimRgn->SampleLoops && (loopinfo.LoopStart + loopinfo.LoopLength) <= MaxRAMPos); - if (pDiskThread->OrderNewStream(&DiskStreamRef, pSample, MaxRAMPos, !RAMLoop) < 0) { + if (pDiskThread->OrderNewStream(&DiskStreamRef, pDimRgn, MaxRAMPos, !RAMLoop) < 0) { dmsg(1,("Disk stream order failed!\n")); KillImmediately(); return -1; @@ -163,21 +179,28 @@ } else { // RAM only voice MaxRAMPos = cachedsamples; - if (pSample->Loops) { - RAMLoop = true; - LoopCyclesLeft = pSample->LoopPlayCount; - } - else RAMLoop = false; + RAMLoop = (pDimRgn->SampleLoops != 0); dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); } - + if (RAMLoop) { + loop.uiTotalCycles = pSample->LoopPlayCount; + loop.uiCyclesLeft = pSample->LoopPlayCount; + loop.uiStart = loopinfo.LoopStart; + loop.uiEnd = loopinfo.LoopStart + loopinfo.LoopLength; + loop.uiSize = loopinfo.LoopLength; + } // calculate initial pitch value { - double pitchbasecents = pDimRgn->FineTune + (int) pEngine->ScaleTuning[MIDIKey % 12]; - if (pDimRgn->PitchTrack) pitchbasecents += (MIDIKey - (int) pDimRgn->UnityNote) * 100; - this->PitchBase = RTMath::CentsToFreqRatio(pitchbasecents) * (double(pSample->SamplesPerSecond) / double(pEngine->SampleRate)); - this->PitchBend = RTMath::CentsToFreqRatio(((double) PitchBend / 8192.0) * 200.0); // pitchbend wheel +-2 semitones = 200 cents + double pitchbasecents = pEngineChannel->pInstrument->FineTune + pDimRgn->FineTune + pEngine->ScaleTuning[MIDIKey % 12]; + + // GSt behaviour: maximum transpose up is 40 semitones. If + // MIDI key is more than 40 semitones above unity note, + // the transpose is not done. + if (pDimRgn->PitchTrack && (MIDIKey - (int) pDimRgn->UnityNote) < 40) pitchbasecents += (MIDIKey - (int) pDimRgn->UnityNote) * 100; + + this->PitchBase = RTMath::CentsToFreqRatioUnlimited(pitchbasecents) * (double(pSample->SamplesPerSecond) / double(pEngine->SampleRate)); + this->PitchBend = RTMath::CentsToFreqRatio(PitchBend / 8192.0 * 100.0 * pEngineChannel->pInstrument->PitchbendRange); } // the length of the decay and release curves are dependent on the velocity @@ -192,7 +215,7 @@ eg1controllervalue = 0; break; case ::gig::eg1_ctrl_t::type_channelaftertouch: - eg1controllervalue = 0; // TODO: aftertouch not yet supported + eg1controllervalue = pEngineChannel->ControllerTable[128]; break; case ::gig::eg1_ctrl_t::type_velocity: eg1controllervalue = itNoteOnEvent->Param.Note.Velocity; @@ -214,7 +237,6 @@ EG1.trigger(pDimRgn->EG1PreAttack, pDimRgn->EG1Attack * eg1attack, pDimRgn->EG1Hold, - pSample->LoopStart, pDimRgn->EG1Decay1 * eg1decay * velrelease, pDimRgn->EG1Decay2 * eg1decay * velrelease, pDimRgn->EG1InfiniteSustain, @@ -224,6 +246,23 @@ pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } +#ifdef CONFIG_INTERPOLATE_VOLUME + // setup initial volume in synthesis parameters +#ifdef CONFIG_PROCESS_MUTED_CHANNELS + if (pEngineChannel->GetMute()) { + finalSynthesisParameters.fFinalVolumeLeft = 0; + finalSynthesisParameters.fFinalVolumeRight = 0; + } + else +#else + { + float finalVolume = pEngineChannel->MidiVolume * crossfadeVolume * EG1.getLevel(); + + finalSynthesisParameters.fFinalVolumeLeft = finalVolume * VolumeLeft * pEngineChannel->GlobalPanLeft; + finalSynthesisParameters.fFinalVolumeRight = finalVolume * VolumeRight * pEngineChannel->GlobalPanRight; + } +#endif +#endif // setup EG 2 (VCF Cutoff EG) { @@ -234,7 +273,7 @@ eg2controllervalue = 0; break; case ::gig::eg2_ctrl_t::type_channelaftertouch: - eg2controllervalue = 0; // TODO: aftertouch not yet supported + eg2controllervalue = pEngineChannel->ControllerTable[128]; break; case ::gig::eg2_ctrl_t::type_velocity: eg2controllervalue = itNoteOnEvent->Param.Note.Velocity; @@ -253,7 +292,6 @@ EG2.trigger(pDimRgn->EG2PreAttack, pDimRgn->EG2Attack * eg2attack, false, - pSample->LoopStart, pDimRgn->EG2Decay1 * eg2decay * velrelease, pDimRgn->EG2Decay2 * eg2decay * velrelease, pDimRgn->EG2InfiniteSustain, @@ -266,8 +304,16 @@ // setup EG 3 (VCO EG) { - double eg3depth = RTMath::CentsToFreqRatio(pDimRgn->EG3Depth); - EG3.trigger(eg3depth, pDimRgn->EG3Attack, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + // if portamento mode is on, we dedicate EG3 purely for portamento, otherwise if portamento is off we do as told by the patch + bool bPortamento = pEngineChannel->PortamentoMode && pEngineChannel->PortamentoPos >= 0.0f; + float eg3depth = (bPortamento) + ? RTMath::CentsToFreqRatio((pEngineChannel->PortamentoPos - (float) MIDIKey) * 100) + : RTMath::CentsToFreqRatio(pDimRgn->EG3Depth); + float eg3time = (bPortamento) + ? pEngineChannel->PortamentoTime + : pDimRgn->EG3Attack; + EG3.trigger(eg3depth, eg3time, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + dmsg(5,("PortamentoPos=%f, depth=%f, time=%f\n", pEngineChannel->PortamentoPos, eg3depth, eg3time)); } @@ -305,12 +351,15 @@ pLFO1->ExtController = 0; // no external controller bLFO1Enabled = false; } - if (bLFO1Enabled) pLFO1->trigger(pDimRgn->LFO1Frequency, - start_level_max, - lfo1_internal_depth, - pDimRgn->LFO1ControlDepth, - pDimRgn->LFO1FlipPhase, - pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + if (bLFO1Enabled) { + pLFO1->trigger(pDimRgn->LFO1Frequency, + start_level_min, + lfo1_internal_depth, + pDimRgn->LFO1ControlDepth, + pDimRgn->LFO1FlipPhase, + pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + pLFO1->update(pLFO1->ExtController ? pEngineChannel->ControllerTable[pLFO1->ExtController] : 0); + } } @@ -348,12 +397,15 @@ pLFO2->ExtController = 0; // no external controller bLFO2Enabled = false; } - if (bLFO2Enabled) pLFO2->trigger(pDimRgn->LFO2Frequency, - start_level_max, - lfo2_internal_depth, - pDimRgn->LFO2ControlDepth, - pDimRgn->LFO2FlipPhase, - pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + if (bLFO2Enabled) { + pLFO2->trigger(pDimRgn->LFO2Frequency, + start_level_max, + lfo2_internal_depth, + pDimRgn->LFO2ControlDepth, + pDimRgn->LFO2FlipPhase, + pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + pLFO2->update(pLFO2->ExtController ? pEngineChannel->ControllerTable[pLFO2->ExtController] : 0); + } } @@ -373,8 +425,8 @@ break; case ::gig::lfo3_ctrl_aftertouch: lfo3_internal_depth = 0; - pLFO3->ExtController = 0; // TODO: aftertouch not implemented yet - bLFO3Enabled = false; // see TODO comment in line above + pLFO3->ExtController = 128; + bLFO3Enabled = true; break; case ::gig::lfo3_ctrl_internal_modwheel: lfo3_internal_depth = pDimRgn->LFO3InternalDepth; @@ -383,20 +435,23 @@ break; case ::gig::lfo3_ctrl_internal_aftertouch: lfo3_internal_depth = pDimRgn->LFO3InternalDepth; - pLFO1->ExtController = 0; // TODO: aftertouch not implemented yet - bLFO3Enabled = (lfo3_internal_depth > 0 /*|| pDimRgn->LFO3ControlDepth > 0*/); // see TODO comment in line above + pLFO1->ExtController = 128; + bLFO3Enabled = (lfo3_internal_depth > 0 || pDimRgn->LFO3ControlDepth > 0); break; default: lfo3_internal_depth = 0; pLFO3->ExtController = 0; // no external controller bLFO3Enabled = false; } - if (bLFO3Enabled) pLFO3->trigger(pDimRgn->LFO3Frequency, - start_level_mid, - lfo3_internal_depth, - pDimRgn->LFO3ControlDepth, - false, - pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + if (bLFO3Enabled) { + pLFO3->trigger(pDimRgn->LFO3Frequency, + start_level_mid, + lfo3_internal_depth, + pDimRgn->LFO3ControlDepth, + false, + pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + pLFO3->update(pLFO3->ExtController ? pEngineChannel->ControllerTable[pLFO3->ExtController] : 0); + } } @@ -438,7 +493,9 @@ case ::gig::vcf_cutoff_ctrl_genpurpose8: VCFCutoffCtrl.controller = 83; break; - case ::gig::vcf_cutoff_ctrl_aftertouch: //TODO: not implemented yet + case ::gig::vcf_cutoff_ctrl_aftertouch: + VCFCutoffCtrl.controller = 128; + break; case ::gig::vcf_cutoff_ctrl_none: default: VCFCutoffCtrl.controller = 0; @@ -469,11 +526,11 @@ #endif // CONFIG_OVERRIDE_RESONANCE_CTRL #ifndef CONFIG_OVERRIDE_FILTER_TYPE - FilterLeft.SetType(pDimRgn->VCFType); - FilterRight.SetType(pDimRgn->VCFType); + finalSynthesisParameters.filterLeft.SetType(pDimRgn->VCFType); + finalSynthesisParameters.filterRight.SetType(pDimRgn->VCFType); #else // override filter type - FilterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); - FilterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); + finalSynthesisParameters.filterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); + finalSynthesisParameters.filterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); #endif // CONFIG_OVERRIDE_FILTER_TYPE VCFCutoffCtrl.value = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; @@ -490,23 +547,19 @@ if (VCFCutoffCtrl.controller) { cvalue = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; if (pDimRgn->VCFCutoffControllerInvert) cvalue = 127 - cvalue; + // VCFVelocityScale in this case means Minimum cutoff if (cvalue < pDimRgn->VCFVelocityScale) cvalue = pDimRgn->VCFVelocityScale; } else { cvalue = pDimRgn->VCFCutoff; } - cutoff *= float(cvalue) * 0.00787402f; // (1 / 127) - if (cutoff > 1.0) cutoff = 1.0; - cutoff = exp(cutoff * FILTER_CUTOFF_COEFF) * CONFIG_FILTER_CUTOFF_MIN; + cutoff *= float(cvalue); + if (cutoff > 127.0f) cutoff = 127.0f; // calculate resonance - float resonance = (float) VCFResonanceCtrl.value * 0.00787f; // 0.0..1.0 - if (pDimRgn->VCFKeyboardTracking) { - resonance += (float) (itNoteOnEvent->Param.Note.Key - pDimRgn->VCFKeyboardTrackingBreakpoint) * 0.00787f; - } - Constrain(resonance, 0.0, 1.0); // correct resonance if outside allowed value range (0.0..1.0) + float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : pDimRgn->VCFResonance); - VCFCutoffCtrl.fvalue = cutoff - CONFIG_FILTER_CUTOFF_MIN; + VCFCutoffCtrl.fvalue = cutoff; VCFResonanceCtrl.fvalue = resonance; } else { @@ -547,12 +600,11 @@ if (DiskVoice) { // check if we reached the allowed limit of the sample RAM cache - if (Pos > MaxRAMPos) { - dmsg(5,("Voice: switching to disk playback (Pos=%f)\n", Pos)); + if (finalSynthesisParameters.dPos > MaxRAMPos) { + dmsg(5,("Voice: switching to disk playback (Pos=%f)\n", finalSynthesisParameters.dPos)); this->PlaybackState = playback_state_disk; } - } - else if (Pos >= pSample->GetCache().Size / pSample->FrameSize) { + } else if (finalSynthesisParameters.dPos >= pSample->GetCache().Size / pSample->FrameSize) { this->PlaybackState = playback_state_end; } } @@ -567,8 +619,8 @@ KillImmediately(); return; } - DiskStreamRef.pStream->IncrementReadPos(pSample->Channels * (int(Pos) - MaxRAMPos)); - Pos -= int(Pos); + DiskStreamRef.pStream->IncrementReadPos(pSample->Channels * (int(finalSynthesisParameters.dPos) - MaxRAMPos)); + finalSynthesisParameters.dPos -= int(finalSynthesisParameters.dPos); RealSampleWordsLeftToRead = -1; // -1 means no silence has been added yet } @@ -584,15 +636,15 @@ } } - sample_t* ptr = DiskStreamRef.pStream->GetReadPtr(); // get the current read_ptr within the ringbuffer where we read the samples from + sample_t* ptr = (sample_t*)DiskStreamRef.pStream->GetReadPtr(); // get the current read_ptr within the ringbuffer where we read the samples from // render current audio fragment Synthesize(Samples, ptr, Delay); - const int iPos = (int) Pos; + const int iPos = (int) finalSynthesisParameters.dPos; const int readSampleWords = iPos * pSample->Channels; // amount of sample words actually been read DiskStreamRef.pStream->IncrementReadPos(readSampleWords); - Pos -= iPos; // just keep fractional part of Pos + finalSynthesisParameters.dPos -= iPos; // just keep fractional part of playback position // change state of voice to 'end' if we really reached the end of the sample data if (RealSampleWordsLeftToRead >= 0) { @@ -607,9 +659,6 @@ break; } - // Reset synthesis event lists - pEngineChannel->pEvents->clear(); - // Reset delay Delay = 0; @@ -624,8 +673,8 @@ * suspended / not running. */ void Voice::Reset() { - FilterLeft.Reset(); - FilterRight.Reset(); + finalSynthesisParameters.filterLeft.Reset(); + finalSynthesisParameters.filterRight.Reset(); DiskStreamRef.pStream = NULL; DiskStreamRef.hStream = 0; DiskStreamRef.State = Stream::state_unused; @@ -640,16 +689,16 @@ * for the given time. * * @param itEvent - iterator pointing to the next event to be processed - * @param End - youngest time stamp where processing should be stopped + * @param End - youngest time stamp where processing should be stopped */ void Voice::processTransitionEvents(RTList::Iterator& itEvent, uint End) { for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { if (itEvent->Type == Event::type_release) { - EG1.update(EGADSR::event_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); - EG2.update(EGADSR::event_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + EG1.update(EGADSR::event_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + EG2.update(EGADSR::event_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } else if (itEvent->Type == Event::type_cancel_release) { - EG1.update(EGADSR::event_cancel_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); - EG2.update(EGADSR::event_cancel_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + EG1.update(EGADSR::event_cancel_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + EG2.update(EGADSR::event_cancel_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } } } @@ -659,7 +708,7 @@ * the given time. * * @param itEvent - iterator pointing to the next event to be processed - * @param End - youngest time stamp where processing should be stopped + * @param End - youngest time stamp where processing should be stopped */ void Voice::processCCEvents(RTList::Iterator& itEvent, uint End) { for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { @@ -682,7 +731,13 @@ } if (pDimRgn->AttenuationController.type == ::gig::attenuation_ctrl_t::type_controlchange && itEvent->Param.CC.Controller == pDimRgn->AttenuationController.controller_number) { - processCrossFadeEvent(itEvent); + CrossfadeSmoother.update(Engine::CrossfadeCurve[CrossfadeAttenuation(itEvent->Param.CC.Value)]); + } + if (itEvent->Param.CC.Controller == 7) { // volume + VolumeSmoother.update(Engine::VolumeCurve[itEvent->Param.CC.Value]); + } else if (itEvent->Param.CC.Controller == 10) { // panpot + PanLeftSmoother.update(Engine::PanCurve[128 - itEvent->Param.CC.Value]); + PanRightSmoother.update(Engine::PanCurve[itEvent->Param.CC.Value]); } } else if (itEvent->Type == Event::type_pitchbend) { // if pitch bend event processPitchEvent(itEvent); @@ -691,19 +746,7 @@ } void Voice::processPitchEvent(RTList::Iterator& itEvent) { - const float pitch = RTMath::CentsToFreqRatio(((double) itEvent->Param.Pitch.Pitch / 8192.0) * 200.0); // +-two semitones = +-200 cents - fFinalPitch *= pitch; - PitchBend = pitch; - } - - void Voice::processCrossFadeEvent(RTList::Iterator& itEvent) { - CrossfadeVolume = CrossfadeAttenuation(itEvent->Param.CC.Value); - #if CONFIG_PROCESS_MUTED_CHANNELS - const float effectiveVolume = CrossfadeVolume * Volume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume); - #else - const float effectiveVolume = CrossfadeVolume * Volume * pEngineChannel->GlobalVolume; - #endif - fFinalVolume = effectiveVolume; + PitchBend = RTMath::CentsToFreqRatio(itEvent->Param.Pitch.Pitch / 8192.0 * 100.0 * pEngineChannel->pInstrument->PitchbendRange); } void Voice::processCutoffEvent(RTList::Iterator& itEvent) { @@ -712,9 +755,9 @@ VCFCutoffCtrl.value == ccvalue; if (pDimRgn->VCFCutoffControllerInvert) ccvalue = 127 - ccvalue; if (ccvalue < pDimRgn->VCFVelocityScale) ccvalue = pDimRgn->VCFVelocityScale; - float cutoff = CutoffBase * float(ccvalue) * 0.00787402f; // (1 / 127) - if (cutoff > 1.0) cutoff = 1.0; - cutoff = exp(cutoff * FILTER_CUTOFF_COEFF) * CONFIG_FILTER_CUTOFF_MIN - CONFIG_FILTER_CUTOFF_MIN; + float cutoff = CutoffBase * float(ccvalue); + if (cutoff > 127.0f) cutoff = 127.0f; + VCFCutoffCtrl.fvalue = cutoff; // needed for initialization of fFinalCutoff next time fFinalCutoff = cutoff; } @@ -723,10 +766,10 @@ // convert absolute controller value to differential const int ctrldelta = itEvent->Param.CC.Value - VCFResonanceCtrl.value; VCFResonanceCtrl.value = itEvent->Param.CC.Value; - const float resonancedelta = (float) ctrldelta * 0.00787f; // 0.0..1.0 + const float resonancedelta = (float) ctrldelta; fFinalResonance += resonancedelta; // needed for initialization of parameter - VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value * 0.00787f; + VCFResonanceCtrl.fvalue = itEvent->Param.CC.Value; } /** @@ -738,12 +781,40 @@ * @param Skip - number of sample points to skip in output buffer */ void Voice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { + finalSynthesisParameters.pOutLeft = &pEngineChannel->pChannelLeft->Buffer()[Skip]; + finalSynthesisParameters.pOutRight = &pEngineChannel->pChannelRight->Buffer()[Skip]; + finalSynthesisParameters.pSrc = pSrc; + RTList::Iterator itCCEvent = pEngineChannel->pEvents->first(); RTList::Iterator itNoteEvent = pEngineChannel->pMIDIKeyInfo[MIDIKey].pEvents->first(); - if (Skip) { // skip events that happened before this voice was triggered + if (itTriggerEvent) { // skip events that happened before this voice was triggered while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; - while (itNoteEvent && itNoteEvent->FragmentPos() <= Skip) ++itNoteEvent; + // we can't simply compare the timestamp here, because note events + // might happen on the same time stamp, so we have to deal on the + // actual sequence the note events arrived instead (see bug #112) + for (; itNoteEvent; ++itNoteEvent) { + if (itTriggerEvent == itNoteEvent) { + ++itNoteEvent; + break; + } + } + } + + uint killPos; + if (itKillEvent) { + int maxFadeOutPos = Samples - pEngine->MinFadeOutSamples; + if (maxFadeOutPos < 0) { + // There's not enough space in buffer to do a fade out + // from max volume (this can only happen for audio + // drivers that use Samples < MaxSamplesPerCycle). + // End the EG1 here, at pos 0, with a shorter max fade + // out time. + EG1.enterFadeOutStage(Samples / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + itKillEvent = Pool::Iterator(); + } else { + killPos = RTMath::Min(itKillEvent->FragmentPos(), maxFadeOutPos); + } } uint i = Skip; @@ -751,21 +822,30 @@ int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples); // initialize all final synthesis parameters - fFinalPitch = PitchBase * PitchBend; - #if CONFIG_PROCESS_MUTED_CHANNELS - fFinalVolume = this->Volume * this->CrossfadeVolume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume); - #else - fFinalVolume = this->Volume * this->CrossfadeVolume * pEngineChannel->GlobalVolume; - #endif fFinalCutoff = VCFCutoffCtrl.fvalue; fFinalResonance = VCFResonanceCtrl.fvalue; // process MIDI control change and pitchbend events for this subfragment processCCEvents(itCCEvent, iSubFragmentEnd); + finalSynthesisParameters.fFinalPitch = PitchBase * PitchBend; + float fFinalVolume = VolumeSmoother.render() * CrossfadeSmoother.render(); +#ifdef CONFIG_PROCESS_MUTED_CHANNELS + if (pEngineChannel->GetMute()) fFinalVolume = 0; +#endif + // process transition events (note on, note off & sustain pedal) processTransitionEvents(itNoteEvent, iSubFragmentEnd); + // if the voice was killed in this subfragment, or if the + // filter EG is finished, switch EG1 to fade out stage + if ((itKillEvent && killPos <= iSubFragmentEnd) || + (SYNTHESIS_MODE_GET_FILTER(SynthesisMode) && + EG2.getSegmentType() == EGADSR::segment_end)) { + EG1.enterFadeOutStage(); + itKillEvent = Pool::Iterator(); + } + // process envelope generators switch (EG1.getSegmentType()) { case EGADSR::segment_lin: @@ -789,57 +869,118 @@ fFinalCutoff *= EG2.getLevel(); break; // noop } - fFinalPitch *= RTMath::CentsToFreqRatio(EG3.render()); + if (EG3.active()) finalSynthesisParameters.fFinalPitch *= EG3.render(); // process low frequency oscillators - if (bLFO1Enabled) fFinalVolume *= pLFO1->render(); + if (bLFO1Enabled) fFinalVolume *= (1.0f - pLFO1->render()); if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); - if (bLFO3Enabled) fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); + if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); + + // limit the pitch so we don't read outside the buffer + finalSynthesisParameters.fFinalPitch = RTMath::Min(finalSynthesisParameters.fFinalPitch, float(1 << CONFIG_MAX_PITCH)); // if filter enabled then update filter coefficients if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) { - FilterLeft.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); - FilterRight.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); + finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); + finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); } - // how many steps do we calculate for this next subfragment - const int steps = iSubFragmentEnd - i; + // do we need resampling? + const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f; + const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f; + const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT && + finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT); + SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired); + + // prepare final synthesis parameters structure + finalSynthesisParameters.uiToGo = iSubFragmentEnd - i; +#ifdef CONFIG_INTERPOLATE_VOLUME + finalSynthesisParameters.fFinalVolumeDeltaLeft = + (fFinalVolume * VolumeLeft * PanLeftSmoother.render() - + finalSynthesisParameters.fFinalVolumeLeft) / finalSynthesisParameters.uiToGo; + finalSynthesisParameters.fFinalVolumeDeltaRight = + (fFinalVolume * VolumeRight * PanRightSmoother.render() - + finalSynthesisParameters.fFinalVolumeRight) / finalSynthesisParameters.uiToGo; +#else + finalSynthesisParameters.fFinalVolumeLeft = + fFinalVolume * VolumeLeft * PanLeftSmoother.render(); + finalSynthesisParameters.fFinalVolumeRight = + fFinalVolume * VolumeRight * PanRightSmoother.render(); +#endif + // render audio for one subfragment + RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); - // select the appropriate synthesis mode - SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, fFinalPitch != 1.0f); + // stop the rendering if volume EG is finished + if (EG1.getSegmentType() == EGADSR::segment_end) break; - // render audio for one subfragment - RunSynthesisFunction(SynthesisMode, *this, iSubFragmentEnd, pSrc, i); + const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch; // increment envelopes' positions if (EG1.active()) { + + // if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage + if (pDimRgn->SampleLoops && Pos <= pDimRgn->pSampleLoops[0].LoopStart && pDimRgn->pSampleLoops[0].LoopStart < newPos) { + EG1.update(EGADSR::event_hold_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + } + EG1.increment(1); - if (!EG1.toStageEndLeft()) EG1.update(EGADSR::event_stage_end, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + if (!EG1.toStageEndLeft()) EG1.update(EGADSR::event_stage_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } if (EG2.active()) { EG2.increment(1); - if (!EG2.toStageEndLeft()) EG2.update(EGADSR::event_stage_end, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); + if (!EG2.toStageEndLeft()) EG2.update(EGADSR::event_stage_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); } EG3.increment(1); if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached + Pos = newPos; i = iSubFragmentEnd; } } + /** @brief Update current portamento position. + * + * Will be called when portamento mode is enabled to get the final + * portamento position of this active voice from where the next voice(s) + * might continue to slide on. + * + * @param itNoteOffEvent - event which causes this voice to die soon + */ + void Voice::UpdatePortamentoPos(Pool::Iterator& itNoteOffEvent) { + const float fFinalEG3Level = EG3.level(itNoteOffEvent->FragmentPos()); + pEngineChannel->PortamentoPos = (float) MIDIKey + RTMath::FreqRatioToCents(fFinalEG3Level) * 0.01f; + } + /** * Immediately kill the voice. This method should not be used to kill * a normal, active voice, because it doesn't take care of things like * fading down the volume level to avoid clicks and regular processing * until the kill event actually occured! * - * @see Kill() + * If it's necessary to know when the voice's disk stream was actually + * deleted, then one can set the optional @a bRequestNotification + * parameter and this method will then return the handle of the disk + * stream (unique identifier) and one can use this handle to poll the + * disk thread if this stream has been deleted. In any case this method + * will return immediately and will not block until the stream actually + * was deleted. + * + * @param bRequestNotification - (optional) whether the disk thread shall + * provide a notification once it deleted + * the respective disk stream + * (default=false) + * @returns handle to the voice's disk stream or @c Stream::INVALID_HANDLE + * if the voice did not use a disk stream at all + * @see Kill() */ - void Voice::KillImmediately() { + Stream::Handle Voice::KillImmediately(bool bRequestNotification) { + Stream::Handle hStream = Stream::INVALID_HANDLE; if (DiskVoice && DiskStreamRef.State != Stream::state_unused) { - pDiskThread->OrderDeletionOfStream(&DiskStreamRef); + pDiskThread->OrderDeletionOfStream(&DiskStreamRef, bRequestNotification); + hStream = DiskStreamRef.hStream; } Reset(); + return hStream; } /**