23 |
|
|
24 |
#include "../../common/Features.h" |
#include "../../common/Features.h" |
25 |
#include "Synthesizer.h" |
#include "Synthesizer.h" |
26 |
|
#include "Profiler.h" |
27 |
|
|
28 |
#include "Voice.h" |
#include "Voice.h" |
29 |
|
|
50 |
#else |
#else |
51 |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); |
SYNTHESIS_MODE_SET_IMPLEMENTATION(SynthesisMode, false); |
52 |
#endif |
#endif |
53 |
SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, true); |
SYNTHESIS_MODE_SET_PROFILING(SynthesisMode, Profiler::isEnabled()); |
54 |
|
|
55 |
FilterLeft.Reset(); |
finalSynthesisParameters.filterLeft.Reset(); |
56 |
FilterRight.Reset(); |
finalSynthesisParameters.filterRight.Reset(); |
57 |
} |
} |
58 |
|
|
59 |
Voice::~Voice() { |
Voice::~Voice() { |
139 |
PanLeft = 1.0f - float(RTMath::Max(pDimRgn->Pan, 0)) / 63.0f; |
PanLeft = 1.0f - float(RTMath::Max(pDimRgn->Pan, 0)) / 63.0f; |
140 |
PanRight = 1.0f - float(RTMath::Min(pDimRgn->Pan, 0)) / -64.0f; |
PanRight = 1.0f - float(RTMath::Min(pDimRgn->Pan, 0)) / -64.0f; |
141 |
|
|
142 |
Pos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) |
finalSynthesisParameters.dPos = pDimRgn->SampleStartOffset; // offset where we should start playback of sample (0 - 2000 sample points) |
143 |
|
Pos = pDimRgn->SampleStartOffset; |
144 |
|
|
145 |
// Check if the sample needs disk streaming or is too short for that |
// Check if the sample needs disk streaming or is too short for that |
146 |
long cachedsamples = pSample->GetCache().Size / pSample->FrameSize; |
long cachedsamples = pSample->GetCache().Size / pSample->FrameSize; |
150 |
MaxRAMPos = cachedsamples - (pEngine->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / pSample->Channels; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK) |
MaxRAMPos = cachedsamples - (pEngine->MaxSamplesPerCycle << CONFIG_MAX_PITCH) / pSample->Channels; //TODO: this calculation is too pessimistic and may better be moved to Render() method, so it calculates MaxRAMPos dependent to the current demand of sample points to be rendered (e.g. in case of JACK) |
151 |
|
|
152 |
// check if there's a loop defined which completely fits into the cached (RAM) part of the sample |
// check if there's a loop defined which completely fits into the cached (RAM) part of the sample |
153 |
if (pSample->Loops && pSample->LoopEnd <= MaxRAMPos) { |
RAMLoop = (pSample->Loops && pSample->LoopEnd <= MaxRAMPos); |
|
RAMLoop = true; |
|
|
LoopCyclesLeft = pSample->LoopPlayCount; |
|
|
} |
|
|
else RAMLoop = false; |
|
154 |
|
|
155 |
if (pDiskThread->OrderNewStream(&DiskStreamRef, pSample, MaxRAMPos, !RAMLoop) < 0) { |
if (pDiskThread->OrderNewStream(&DiskStreamRef, pSample, MaxRAMPos, !RAMLoop) < 0) { |
156 |
dmsg(1,("Disk stream order failed!\n")); |
dmsg(1,("Disk stream order failed!\n")); |
161 |
} |
} |
162 |
else { // RAM only voice |
else { // RAM only voice |
163 |
MaxRAMPos = cachedsamples; |
MaxRAMPos = cachedsamples; |
164 |
if (pSample->Loops) { |
RAMLoop = (pSample->Loops != 0); |
|
RAMLoop = true; |
|
|
LoopCyclesLeft = pSample->LoopPlayCount; |
|
|
} |
|
|
else RAMLoop = false; |
|
165 |
dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); |
dmsg(4,("RAM only voice launched (Looping: %s)\n", (RAMLoop) ? "yes" : "no")); |
166 |
} |
} |
167 |
|
if (RAMLoop) { |
168 |
|
loop.uiTotalCycles = pSample->LoopPlayCount; |
169 |
|
loop.uiCyclesLeft = pSample->LoopPlayCount; |
170 |
|
loop.uiStart = pSample->LoopStart; |
171 |
|
loop.uiEnd = pSample->LoopEnd; |
172 |
|
loop.uiSize = pSample->LoopSize; |
173 |
|
} |
174 |
|
|
175 |
// calculate initial pitch value |
// calculate initial pitch value |
176 |
{ |
{ |
214 |
EG1.trigger(pDimRgn->EG1PreAttack, |
EG1.trigger(pDimRgn->EG1PreAttack, |
215 |
pDimRgn->EG1Attack * eg1attack, |
pDimRgn->EG1Attack * eg1attack, |
216 |
pDimRgn->EG1Hold, |
pDimRgn->EG1Hold, |
|
pSample->LoopStart, |
|
217 |
pDimRgn->EG1Decay1 * eg1decay * velrelease, |
pDimRgn->EG1Decay1 * eg1decay * velrelease, |
218 |
pDimRgn->EG1Decay2 * eg1decay * velrelease, |
pDimRgn->EG1Decay2 * eg1decay * velrelease, |
219 |
pDimRgn->EG1InfiniteSustain, |
pDimRgn->EG1InfiniteSustain, |
252 |
EG2.trigger(pDimRgn->EG2PreAttack, |
EG2.trigger(pDimRgn->EG2PreAttack, |
253 |
pDimRgn->EG2Attack * eg2attack, |
pDimRgn->EG2Attack * eg2attack, |
254 |
false, |
false, |
|
pSample->LoopStart, |
|
255 |
pDimRgn->EG2Decay1 * eg2decay * velrelease, |
pDimRgn->EG2Decay1 * eg2decay * velrelease, |
256 |
pDimRgn->EG2Decay2 * eg2decay * velrelease, |
pDimRgn->EG2Decay2 * eg2decay * velrelease, |
257 |
pDimRgn->EG2InfiniteSustain, |
pDimRgn->EG2InfiniteSustain, |
467 |
#endif // CONFIG_OVERRIDE_RESONANCE_CTRL |
#endif // CONFIG_OVERRIDE_RESONANCE_CTRL |
468 |
|
|
469 |
#ifndef CONFIG_OVERRIDE_FILTER_TYPE |
#ifndef CONFIG_OVERRIDE_FILTER_TYPE |
470 |
FilterLeft.SetType(pDimRgn->VCFType); |
finalSynthesisParameters.filterLeft.SetType(pDimRgn->VCFType); |
471 |
FilterRight.SetType(pDimRgn->VCFType); |
finalSynthesisParameters.filterRight.SetType(pDimRgn->VCFType); |
472 |
#else // override filter type |
#else // override filter type |
473 |
FilterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
FilterLeft.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
474 |
FilterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
FilterRight.SetType(CONFIG_OVERRIDE_FILTER_TYPE); |
488 |
if (VCFCutoffCtrl.controller) { |
if (VCFCutoffCtrl.controller) { |
489 |
cvalue = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
cvalue = pEngineChannel->ControllerTable[VCFCutoffCtrl.controller]; |
490 |
if (pDimRgn->VCFCutoffControllerInvert) cvalue = 127 - cvalue; |
if (pDimRgn->VCFCutoffControllerInvert) cvalue = 127 - cvalue; |
491 |
|
// VCFVelocityScale in this case means Minimum cutoff |
492 |
if (cvalue < pDimRgn->VCFVelocityScale) cvalue = pDimRgn->VCFVelocityScale; |
if (cvalue < pDimRgn->VCFVelocityScale) cvalue = pDimRgn->VCFVelocityScale; |
493 |
} |
} |
494 |
else { |
else { |
496 |
} |
} |
497 |
cutoff *= float(cvalue) * 0.00787402f; // (1 / 127) |
cutoff *= float(cvalue) * 0.00787402f; // (1 / 127) |
498 |
if (cutoff > 1.0) cutoff = 1.0; |
if (cutoff > 1.0) cutoff = 1.0; |
499 |
cutoff = exp(cutoff * FILTER_CUTOFF_COEFF) * CONFIG_FILTER_CUTOFF_MIN; |
cutoff = (cutoff < 0.5 ? cutoff * 4826 - 1 : cutoff * 5715 - 449); |
500 |
|
if (cutoff < 1.0) cutoff = 1.0; |
501 |
|
|
502 |
// calculate resonance |
// calculate resonance |
503 |
float resonance = (float) VCFResonanceCtrl.value * 0.00787f; // 0.0..1.0 |
float resonance = (float) (VCFResonanceCtrl.controller ? VCFResonanceCtrl.value : pDimRgn->VCFResonance) * 0.00787f; // 0.0..1.0 |
|
if (pDimRgn->VCFKeyboardTracking) { |
|
|
resonance += (float) (itNoteOnEvent->Param.Note.Key - pDimRgn->VCFKeyboardTrackingBreakpoint) * 0.00787f; |
|
|
} |
|
|
Constrain(resonance, 0.0, 1.0); // correct resonance if outside allowed value range (0.0..1.0) |
|
504 |
|
|
505 |
VCFCutoffCtrl.fvalue = cutoff - CONFIG_FILTER_CUTOFF_MIN; |
VCFCutoffCtrl.fvalue = cutoff - 1.0; |
506 |
VCFResonanceCtrl.fvalue = resonance; |
VCFResonanceCtrl.fvalue = resonance; |
507 |
} |
} |
508 |
else { |
else { |
543 |
|
|
544 |
if (DiskVoice) { |
if (DiskVoice) { |
545 |
// check if we reached the allowed limit of the sample RAM cache |
// check if we reached the allowed limit of the sample RAM cache |
546 |
if (Pos > MaxRAMPos) { |
if (finalSynthesisParameters.dPos > MaxRAMPos) { |
547 |
dmsg(5,("Voice: switching to disk playback (Pos=%f)\n", Pos)); |
dmsg(5,("Voice: switching to disk playback (Pos=%f)\n", finalSynthesisParameters.dPos)); |
548 |
this->PlaybackState = playback_state_disk; |
this->PlaybackState = playback_state_disk; |
549 |
} |
} |
550 |
} |
} else if (finalSynthesisParameters.dPos >= pSample->GetCache().Size / pSample->FrameSize) { |
|
else if (Pos >= pSample->GetCache().Size / pSample->FrameSize) { |
|
551 |
this->PlaybackState = playback_state_end; |
this->PlaybackState = playback_state_end; |
552 |
} |
} |
553 |
} |
} |
562 |
KillImmediately(); |
KillImmediately(); |
563 |
return; |
return; |
564 |
} |
} |
565 |
DiskStreamRef.pStream->IncrementReadPos(pSample->Channels * (int(Pos) - MaxRAMPos)); |
DiskStreamRef.pStream->IncrementReadPos(pSample->Channels * (int(finalSynthesisParameters.dPos) - MaxRAMPos)); |
566 |
Pos -= int(Pos); |
finalSynthesisParameters.dPos -= int(finalSynthesisParameters.dPos); |
567 |
RealSampleWordsLeftToRead = -1; // -1 means no silence has been added yet |
RealSampleWordsLeftToRead = -1; // -1 means no silence has been added yet |
568 |
} |
} |
569 |
|
|
584 |
// render current audio fragment |
// render current audio fragment |
585 |
Synthesize(Samples, ptr, Delay); |
Synthesize(Samples, ptr, Delay); |
586 |
|
|
587 |
const int iPos = (int) Pos; |
const int iPos = (int) finalSynthesisParameters.dPos; |
588 |
const int readSampleWords = iPos * pSample->Channels; // amount of sample words actually been read |
const int readSampleWords = iPos * pSample->Channels; // amount of sample words actually been read |
589 |
DiskStreamRef.pStream->IncrementReadPos(readSampleWords); |
DiskStreamRef.pStream->IncrementReadPos(readSampleWords); |
590 |
Pos -= iPos; // just keep fractional part of Pos |
finalSynthesisParameters.dPos -= iPos; // just keep fractional part of playback position |
591 |
|
|
592 |
// change state of voice to 'end' if we really reached the end of the sample data |
// change state of voice to 'end' if we really reached the end of the sample data |
593 |
if (RealSampleWordsLeftToRead >= 0) { |
if (RealSampleWordsLeftToRead >= 0) { |
602 |
break; |
break; |
603 |
} |
} |
604 |
|
|
|
// Reset synthesis event lists |
|
|
pEngineChannel->pEvents->clear(); |
|
|
|
|
605 |
// Reset delay |
// Reset delay |
606 |
Delay = 0; |
Delay = 0; |
607 |
|
|
616 |
* suspended / not running. |
* suspended / not running. |
617 |
*/ |
*/ |
618 |
void Voice::Reset() { |
void Voice::Reset() { |
619 |
FilterLeft.Reset(); |
finalSynthesisParameters.filterLeft.Reset(); |
620 |
FilterRight.Reset(); |
finalSynthesisParameters.filterRight.Reset(); |
621 |
DiskStreamRef.pStream = NULL; |
DiskStreamRef.pStream = NULL; |
622 |
DiskStreamRef.hStream = 0; |
DiskStreamRef.hStream = 0; |
623 |
DiskStreamRef.State = Stream::state_unused; |
DiskStreamRef.State = Stream::state_unused; |
637 |
void Voice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) { |
void Voice::processTransitionEvents(RTList<Event>::Iterator& itEvent, uint End) { |
638 |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
for (; itEvent && itEvent->FragmentPos() <= End; ++itEvent) { |
639 |
if (itEvent->Type == Event::type_release) { |
if (itEvent->Type == Event::type_release) { |
640 |
EG1.update(EGADSR::event_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
EG1.update(EGADSR::event_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
641 |
EG2.update(EGADSR::event_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
EG2.update(EGADSR::event_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
642 |
} else if (itEvent->Type == Event::type_cancel_release) { |
} else if (itEvent->Type == Event::type_cancel_release) { |
643 |
EG1.update(EGADSR::event_cancel_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
EG1.update(EGADSR::event_cancel_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
644 |
EG2.update(EGADSR::event_cancel_release, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
EG2.update(EGADSR::event_cancel_release, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
645 |
} |
} |
646 |
} |
} |
647 |
} |
} |
684 |
|
|
685 |
void Voice::processPitchEvent(RTList<Event>::Iterator& itEvent) { |
void Voice::processPitchEvent(RTList<Event>::Iterator& itEvent) { |
686 |
const float pitch = RTMath::CentsToFreqRatio(((double) itEvent->Param.Pitch.Pitch / 8192.0) * 200.0); // +-two semitones = +-200 cents |
const float pitch = RTMath::CentsToFreqRatio(((double) itEvent->Param.Pitch.Pitch / 8192.0) * 200.0); // +-two semitones = +-200 cents |
687 |
fFinalPitch *= pitch; |
finalSynthesisParameters.fFinalPitch *= pitch; |
688 |
|
PitchBend = pitch; |
689 |
} |
} |
690 |
|
|
691 |
void Voice::processCrossFadeEvent(RTList<Event>::Iterator& itEvent) { |
void Voice::processCrossFadeEvent(RTList<Event>::Iterator& itEvent) { |
706 |
if (ccvalue < pDimRgn->VCFVelocityScale) ccvalue = pDimRgn->VCFVelocityScale; |
if (ccvalue < pDimRgn->VCFVelocityScale) ccvalue = pDimRgn->VCFVelocityScale; |
707 |
float cutoff = CutoffBase * float(ccvalue) * 0.00787402f; // (1 / 127) |
float cutoff = CutoffBase * float(ccvalue) * 0.00787402f; // (1 / 127) |
708 |
if (cutoff > 1.0) cutoff = 1.0; |
if (cutoff > 1.0) cutoff = 1.0; |
709 |
cutoff = exp(cutoff * FILTER_CUTOFF_COEFF) * CONFIG_FILTER_CUTOFF_MIN - CONFIG_FILTER_CUTOFF_MIN; |
cutoff = (cutoff < 0.5 ? cutoff * 4826 - 1 : cutoff * 5715 - 449); |
710 |
VCFCutoffCtrl.fvalue = cutoff; // needed for initialization of fFinalCutoff next time |
if (cutoff < 1.0) cutoff = 1.0; |
711 |
|
|
712 |
|
VCFCutoffCtrl.fvalue = cutoff - 1.0; // needed for initialization of fFinalCutoff next time |
713 |
fFinalCutoff = cutoff; |
fFinalCutoff = cutoff; |
714 |
} |
} |
715 |
|
|
732 |
* @param Skip - number of sample points to skip in output buffer |
* @param Skip - number of sample points to skip in output buffer |
733 |
*/ |
*/ |
734 |
void Voice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
void Voice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
735 |
|
finalSynthesisParameters.pOutLeft = &pEngineChannel->pOutputLeft[Skip]; |
736 |
|
finalSynthesisParameters.pOutRight = &pEngineChannel->pOutputRight[Skip]; |
737 |
|
finalSynthesisParameters.pSrc = pSrc; |
738 |
|
|
739 |
RTList<Event>::Iterator itCCEvent = pEngineChannel->pEvents->first(); |
RTList<Event>::Iterator itCCEvent = pEngineChannel->pEvents->first(); |
740 |
RTList<Event>::Iterator itNoteEvent = pEngineChannel->pMIDIKeyInfo[MIDIKey].pEvents->first(); |
RTList<Event>::Iterator itNoteEvent = pEngineChannel->pMIDIKeyInfo[MIDIKey].pEvents->first(); |
741 |
|
|
742 |
if (Skip) { // skip events that happened before this voice was triggered |
if (Skip) { // skip events that happened before this voice was triggered |
743 |
while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; |
while (itCCEvent && itCCEvent->FragmentPos() <= Skip) ++itCCEvent; |
744 |
while (itNoteEvent && itNoteEvent->FragmentPos() <= Skip) ++itNoteEvent; |
while (itNoteEvent && itNoteEvent->FragmentPos() <= Skip) ++itNoteEvent; |
745 |
} |
} |
746 |
|
|
747 |
|
uint killPos; |
748 |
|
if (itKillEvent) killPos = RTMath::Min(itKillEvent->FragmentPos(), pEngine->MaxFadeOutPos); |
749 |
|
|
750 |
uint i = Skip; |
uint i = Skip; |
751 |
while (i < Samples) { |
while (i < Samples) { |
752 |
int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples); |
int iSubFragmentEnd = RTMath::Min(i + CONFIG_DEFAULT_SUBFRAGMENT_SIZE, Samples); |
753 |
|
|
754 |
// initialize all final synthesis parameters |
// initialize all final synthesis parameters |
755 |
fFinalPitch = PitchBase * PitchBend; |
finalSynthesisParameters.fFinalPitch = PitchBase * PitchBend; |
756 |
#if CONFIG_PROCESS_MUTED_CHANNELS |
#if CONFIG_PROCESS_MUTED_CHANNELS |
757 |
fFinalVolume = this->Volume * this->CrossfadeVolume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume)); |
fFinalVolume = this->Volume * this->CrossfadeVolume * (pEngineChannel->GetMute() ? 0 : pEngineChannel->GlobalVolume); |
758 |
#else |
#else |
759 |
fFinalVolume = this->Volume * this->CrossfadeVolume * pEngineChannel->GlobalVolume; |
fFinalVolume = this->Volume * this->CrossfadeVolume * pEngineChannel->GlobalVolume; |
760 |
#endif |
#endif |
761 |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
fFinalCutoff = VCFCutoffCtrl.fvalue; |
762 |
fFinalResonance = VCFResonanceCtrl.fvalue; |
fFinalResonance = VCFResonanceCtrl.fvalue; |
763 |
|
|
764 |
// process MIDI control change and pitchbend events for this subfragment |
// process MIDI control change and pitchbend events for this subfragment |
765 |
processCCEvents(itCCEvent, iSubFragmentEnd); |
processCCEvents(itCCEvent, iSubFragmentEnd); |
766 |
|
|
767 |
// process transition events (note on, note off & sustain pedal) |
// process transition events (note on, note off & sustain pedal) |
768 |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
769 |
|
|
770 |
|
// if the voice was killed in this subfragment switch EG1 to fade out stage |
771 |
|
if (itKillEvent && killPos <= iSubFragmentEnd) { |
772 |
|
EG1.enterFadeOutStage(); |
773 |
|
itKillEvent = Pool<Event>::Iterator(); |
774 |
|
} |
775 |
|
|
776 |
// process envelope generators |
// process envelope generators |
777 |
switch (EG1.getSegmentType()) { |
switch (EG1.getSegmentType()) { |
778 |
case EGADSR::segment_lin: |
case EGADSR::segment_lin: |
796 |
fFinalCutoff *= EG2.getLevel(); |
fFinalCutoff *= EG2.getLevel(); |
797 |
break; // noop |
break; // noop |
798 |
} |
} |
799 |
fFinalPitch *= RTMath::CentsToFreqRatio(EG3.render()); |
if (EG3.active()) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(EG3.render()); |
800 |
|
|
801 |
// process low frequency oscillators |
// process low frequency oscillators |
802 |
if (bLFO1Enabled) fFinalVolume *= pLFO1->render(); |
if (bLFO1Enabled) fFinalVolume *= pLFO1->render(); |
803 |
if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); |
if (bLFO2Enabled) fFinalCutoff *= pLFO2->render(); |
804 |
if (bLFO3Enabled) fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
if (bLFO3Enabled) finalSynthesisParameters.fFinalPitch *= RTMath::CentsToFreqRatio(pLFO3->render()); |
805 |
|
|
806 |
// if filter enabled then update filter coefficients |
// if filter enabled then update filter coefficients |
807 |
if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) { |
if (SYNTHESIS_MODE_GET_FILTER(SynthesisMode)) { |
808 |
FilterLeft.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); |
finalSynthesisParameters.filterLeft.SetParameters(fFinalCutoff + 1.0, fFinalResonance, pEngine->SampleRate); |
809 |
FilterRight.SetParameters(fFinalCutoff, fFinalResonance, pEngine->SampleRate); |
finalSynthesisParameters.filterRight.SetParameters(fFinalCutoff + 1.0, fFinalResonance, pEngine->SampleRate); |
810 |
} |
} |
811 |
|
|
812 |
// how many steps do we calculate for this next subfragment |
// do we need resampling? |
813 |
const int steps = iSubFragmentEnd - i; |
const float __PLUS_ONE_CENT = 1.000577789506554859250142541782224725466f; |
814 |
|
const float __MINUS_ONE_CENT = 0.9994225441413807496009516495583113737666f; |
815 |
// select the appropriate synthesis mode |
const bool bResamplingRequired = !(finalSynthesisParameters.fFinalPitch <= __PLUS_ONE_CENT && |
816 |
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, fFinalPitch != 1.0f); |
finalSynthesisParameters.fFinalPitch >= __MINUS_ONE_CENT); |
817 |
|
SYNTHESIS_MODE_SET_INTERPOLATE(SynthesisMode, bResamplingRequired); |
818 |
|
|
819 |
|
// prepare final synthesis parameters structure |
820 |
|
finalSynthesisParameters.fFinalVolumeLeft = fFinalVolume * PanLeft; |
821 |
|
finalSynthesisParameters.fFinalVolumeRight = fFinalVolume * PanRight; |
822 |
|
finalSynthesisParameters.uiToGo = iSubFragmentEnd - i; |
823 |
|
|
824 |
// render audio for one subfragment |
// render audio for one subfragment |
825 |
RunSynthesisFunction(SynthesisMode, *this, iSubFragmentEnd, pSrc, i); |
RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
826 |
|
|
827 |
|
const double newPos = Pos + (iSubFragmentEnd - i) * finalSynthesisParameters.fFinalPitch; |
828 |
|
|
829 |
// increment envelopes' positions |
// increment envelopes' positions |
830 |
if (EG1.active()) { |
if (EG1.active()) { |
831 |
EG1.increment(steps); |
|
832 |
if (!EG1.toStageEndLeft()) EG1.update(EGADSR::event_stage_end, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
// if sample has a loop and loop start has been reached in this subfragment, send a special event to EG1 to let it finish the attack hold stage |
833 |
|
if (pSample->Loops && Pos <= pSample->LoopStart && pSample->LoopStart < newPos) { |
834 |
|
EG1.update(EGADSR::event_hold_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
835 |
|
} |
836 |
|
|
837 |
|
EG1.increment(1); |
838 |
|
if (!EG1.toStageEndLeft()) EG1.update(EGADSR::event_stage_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
839 |
} |
} |
840 |
if (EG2.active()) { |
if (EG2.active()) { |
841 |
EG2.increment(steps); |
EG2.increment(1); |
842 |
if (!EG2.toStageEndLeft()) EG2.update(EGADSR::event_stage_end, this->Pos, fFinalPitch, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
if (!EG2.toStageEndLeft()) EG2.update(EGADSR::event_stage_end, pEngine->SampleRate / CONFIG_DEFAULT_SUBFRAGMENT_SIZE); |
843 |
} |
} |
844 |
EG3.increment(steps); |
EG3.increment(1); |
845 |
if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached |
if (!EG3.toEndLeft()) EG3.update(); // neutralize envelope coefficient if end reached |
846 |
|
|
847 |
|
Pos = newPos; |
848 |
|
i = iSubFragmentEnd; |
849 |
} |
} |
850 |
} |
} |
851 |
|
|