43 |
|
|
44 |
finalSynthesisParameters.filterLeft.Reset(); |
finalSynthesisParameters.filterLeft.Reset(); |
45 |
finalSynthesisParameters.filterRight.Reset(); |
finalSynthesisParameters.filterRight.Reset(); |
46 |
|
|
47 |
|
pEq = NULL; |
48 |
|
bEqSupport = false; |
49 |
} |
} |
50 |
|
|
51 |
AbstractVoice::~AbstractVoice() { |
AbstractVoice::~AbstractVoice() { |
52 |
if (pLFO1) delete pLFO1; |
if (pLFO1) delete pLFO1; |
53 |
if (pLFO2) delete pLFO2; |
if (pLFO2) delete pLFO2; |
54 |
if (pLFO3) delete pLFO3; |
if (pLFO3) delete pLFO3; |
55 |
|
|
56 |
|
if(pEq != NULL) delete pEq; |
57 |
|
} |
58 |
|
|
59 |
|
void AbstractVoice::CreateEq() { |
60 |
|
if(!bEqSupport) return; |
61 |
|
if(pEq != NULL) delete pEq; |
62 |
|
pEq = new EqSupport; |
63 |
|
pEq->InitEffect(GetEngine()->pAudioOutputDevice); |
64 |
} |
} |
65 |
|
|
66 |
/** |
/** |
324 |
* @param Skip - number of sample points to skip in output buffer |
* @param Skip - number of sample points to skip in output buffer |
325 |
*/ |
*/ |
326 |
void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
void AbstractVoice::Synthesize(uint Samples, sample_t* pSrc, uint Skip) { |
327 |
|
bool delay = false; // Whether the voice playback should be delayed for this call |
328 |
|
|
329 |
|
if (pSignalUnitRack != NULL) { |
330 |
|
uint delaySteps = pSignalUnitRack->GetEndpointUnit()->DelayTrigger(); |
331 |
|
if (delaySteps > 0) { // delay on the endpoint unit means delay of the voice playback |
332 |
|
if (delaySteps >= Samples) { |
333 |
|
pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(Samples); |
334 |
|
delay = true; |
335 |
|
} else { |
336 |
|
pSignalUnitRack->GetEndpointUnit()->DecreaseDelay(delaySteps); |
337 |
|
Samples -= delaySteps; |
338 |
|
Skip += delaySteps; |
339 |
|
} |
340 |
|
} |
341 |
|
} |
342 |
|
|
343 |
AbstractEngineChannel* pChannel = pEngineChannel; |
AbstractEngineChannel* pChannel = pEngineChannel; |
344 |
MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey); |
MidiKeyBase* pMidiKeyInfo = GetMidiKeyInfo(MIDIKey); |
345 |
|
|
346 |
const bool bVoiceRequiresDedicatedRouting = |
const bool bVoiceRequiresDedicatedRouting = |
347 |
pEngineChannel->GetFxSendCount() > 0 && |
pEngineChannel->GetFxSendCount() > 0 && |
348 |
(pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend); |
(pMidiKeyInfo->ReverbSend || pMidiKeyInfo->ChorusSend); |
349 |
|
|
350 |
|
const bool bEq = |
351 |
|
pSignalUnitRack != NULL && pSignalUnitRack->HasEq() && pEq->HasSupport(); |
352 |
|
|
353 |
if (bVoiceRequiresDedicatedRouting) { |
if (bEq) { |
354 |
|
pEq->GetInChannelLeft()->Clear(); |
355 |
|
pEq->GetInChannelRight()->Clear(); |
356 |
|
finalSynthesisParameters.pOutLeft = &pEq->GetInChannelLeft()->Buffer()[Skip]; |
357 |
|
finalSynthesisParameters.pOutRight = &pEq->GetInChannelRight()->Buffer()[Skip]; |
358 |
|
pSignalUnitRack->UpdateEqSettings(pEq); |
359 |
|
} else if (bVoiceRequiresDedicatedRouting) { |
360 |
finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip]; |
finalSynthesisParameters.pOutLeft = &GetEngine()->pDedicatedVoiceChannelLeft->Buffer()[Skip]; |
361 |
finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip]; |
finalSynthesisParameters.pOutRight = &GetEngine()->pDedicatedVoiceChannelRight->Buffer()[Skip]; |
362 |
} else { |
} else { |
432 |
// process transition events (note on, note off & sustain pedal) |
// process transition events (note on, note off & sustain pedal) |
433 |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
processTransitionEvents(itNoteEvent, iSubFragmentEnd); |
434 |
processGroupEvents(itGroupEvent, iSubFragmentEnd); |
processGroupEvents(itGroupEvent, iSubFragmentEnd); |
435 |
|
|
436 |
if (pSignalUnitRack == NULL) { |
if (pSignalUnitRack == NULL) { |
437 |
// if the voice was killed in this subfragment, or if the |
// if the voice was killed in this subfragment, or if the |
438 |
// filter EG is finished, switch EG1 to fade out stage |
// filter EG is finished, switch EG1 to fade out stage |
530 |
fFinalVolume * VolumeRight * PanRightSmoother.render(); |
fFinalVolume * VolumeRight * PanRightSmoother.render(); |
531 |
#endif |
#endif |
532 |
// render audio for one subfragment |
// render audio for one subfragment |
533 |
RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
if (!delay) RunSynthesisFunction(SynthesisMode, &finalSynthesisParameters, &loop); |
534 |
|
|
535 |
if (pSignalUnitRack == NULL) { |
if (pSignalUnitRack == NULL) { |
536 |
// stop the rendering if volume EG is finished |
// stop the rendering if volume EG is finished |
567 |
}*/ |
}*/ |
568 |
// TODO: ^^^ |
// TODO: ^^^ |
569 |
|
|
570 |
pSignalUnitRack->Increment(); |
if (!delay) pSignalUnitRack->Increment(); |
571 |
} |
} |
572 |
|
|
573 |
Pos = newPos; |
Pos = newPos; |
574 |
i = iSubFragmentEnd; |
i = iSubFragmentEnd; |
575 |
} |
} |
576 |
|
|
577 |
|
if (delay) return; |
578 |
|
|
579 |
if (bVoiceRequiresDedicatedRouting) { |
if (bVoiceRequiresDedicatedRouting) { |
580 |
|
if (bEq) { |
581 |
|
pEq->RenderAudio(Samples); |
582 |
|
pEq->GetOutChannelLeft()->CopyTo(GetEngine()->pDedicatedVoiceChannelLeft, Samples); |
583 |
|
pEq->GetOutChannelRight()->CopyTo(GetEngine()->pDedicatedVoiceChannelRight, Samples); |
584 |
|
} |
585 |
optional<float> effectSendLevels[2] = { |
optional<float> effectSendLevels[2] = { |
586 |
pMidiKeyInfo->ReverbSend, |
pMidiKeyInfo->ReverbSend, |
587 |
pMidiKeyInfo->ChorusSend |
pMidiKeyInfo->ChorusSend |
588 |
}; |
}; |
589 |
GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples); |
GetEngine()->RouteDedicatedVoiceChannels(pEngineChannel, effectSendLevels, Samples); |
590 |
|
} else if (bEq) { |
591 |
|
pEq->RenderAudio(Samples); |
592 |
|
pEq->GetOutChannelLeft()->MixTo(pChannel->pChannelLeft, Samples); |
593 |
|
pEq->GetOutChannelRight()->MixTo(pChannel->pChannelRight, Samples); |
594 |
} |
} |
595 |
} |
} |
596 |
|
|