--- libgig/trunk/src/gig.cpp 2006/05/06 11:29:29 858 +++ libgig/trunk/src/gig.cpp 2007/10/05 11:26:53 1384 @@ -1,8 +1,8 @@ /*************************************************************************** * * - * libgig - C++ cross-platform Gigasampler format file loader library * + * libgig - C++ cross-platform Gigasampler format file access library * * * - * Copyright (C) 2003-2005 by Christian Schoenebeck * + * Copyright (C) 2003-2007 by Christian Schoenebeck * * * * * * This library is free software; you can redistribute it and/or modify * @@ -111,6 +111,13 @@ return x & 0x800000 ? x - 0x1000000 : x; } + inline void store24(unsigned char* pDst, int x) + { + pDst[0] = x; + pDst[1] = x >> 8; + pDst[2] = x >> 16; + } + void Decompress16(int compressionmode, const unsigned char* params, int srcStep, int dstStep, const unsigned char* pSrc, int16_t* pDst, @@ -150,14 +157,11 @@ } void Decompress24(int compressionmode, const unsigned char* params, - int dstStep, const unsigned char* pSrc, int16_t* pDst, + int dstStep, const unsigned char* pSrc, uint8_t* pDst, unsigned long currentframeoffset, unsigned long copysamples, int truncatedBits) { - // Note: The 24 bits are truncated to 16 bits for now. - int y, dy, ddy, dddy; - const int shift = 8 - truncatedBits; #define GET_PARAMS(params) \ y = get24(params); \ @@ -173,14 +177,14 @@ #define COPY_ONE(x) \ SKIP_ONE(x); \ - *pDst = y >> shift; \ + store24(pDst, y << truncatedBits); \ pDst += dstStep switch (compressionmode) { case 2: // 24 bit uncompressed pSrc += currentframeoffset * 3; while (copysamples) { - *pDst = get24(pSrc) >> shift; + store24(pDst, get24(pSrc) << truncatedBits); pDst += dstStep; pSrc += 3; copysamples--; @@ -250,6 +254,93 @@ } + +// *************** Internal CRC-32 (Cyclic Redundancy Check) functions *************** +// * + + static uint32_t* __initCRCTable() { + static uint32_t res[256]; + + for (int i = 0 ; i < 256 ; i++) { + uint32_t c = i; + for (int j = 0 ; j < 8 ; j++) { + c = (c & 1) ? 0xedb88320 ^ (c >> 1) : c >> 1; + } + res[i] = c; + } + return res; + } + + static const uint32_t* __CRCTable = __initCRCTable(); + + /** + * Initialize a CRC variable. + * + * @param crc - variable to be initialized + */ + inline static void __resetCRC(uint32_t& crc) { + crc = 0xffffffff; + } + + /** + * Used to calculate checksums of the sample data in a gig file. The + * checksums are stored in the 3crc chunk of the gig file and + * automatically updated when a sample is written with Sample::Write(). + * + * One should call __resetCRC() to initialize the CRC variable to be + * used before calling this function the first time. + * + * After initializing the CRC variable one can call this function + * arbitrary times, i.e. to split the overall CRC calculation into + * steps. + * + * Once the whole data was processed by __calculateCRC(), one should + * call __encodeCRC() to get the final CRC result. + * + * @param buf - pointer to data the CRC shall be calculated of + * @param bufSize - size of the data to be processed + * @param crc - variable the CRC sum shall be stored to + */ + static void __calculateCRC(unsigned char* buf, int bufSize, uint32_t& crc) { + for (int i = 0 ; i < bufSize ; i++) { + crc = __CRCTable[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + } + } + + /** + * Returns the final CRC result. + * + * @param crc - variable previously passed to __calculateCRC() + */ + inline static uint32_t __encodeCRC(const uint32_t& crc) { + return crc ^ 0xffffffff; + } + + + +// *************** Other Internal functions *************** +// * + + static split_type_t __resolveSplitType(dimension_t dimension) { + return ( + dimension == dimension_layer || + dimension == dimension_samplechannel || + dimension == dimension_releasetrigger || + dimension == dimension_keyboard || + dimension == dimension_roundrobin || + dimension == dimension_random || + dimension == dimension_smartmidi || + dimension == dimension_roundrobinkeyboard + ) ? split_type_bit : split_type_normal; + } + + static int __resolveZoneSize(dimension_def_t& dimension_definition) { + return (dimension_definition.split_type == split_type_normal) + ? int(128.0 / dimension_definition.zones) : 0; + } + + + // *************** Sample *************** // * @@ -275,15 +366,23 @@ * is located, 0 otherwise */ Sample::Sample(File* pFile, RIFF::List* waveList, unsigned long WavePoolOffset, unsigned long fileNo) : DLS::Sample((DLS::File*) pFile, waveList, WavePoolOffset) { + static const DLS::Info::FixedStringLength fixedStringLengths[] = { + { CHUNK_ID_INAM, 64 }, + { 0, 0 } + }; + pInfo->FixedStringLengths = fixedStringLengths; Instances++; FileNo = fileNo; + __resetCRC(crc); + pCk3gix = waveList->GetSubChunk(CHUNK_ID_3GIX); if (pCk3gix) { - SampleGroup = pCk3gix->ReadInt16(); + uint16_t iSampleGroup = pCk3gix->ReadInt16(); + pGroup = pFile->GetGroup(iSampleGroup); } else { // '3gix' chunk missing - // use default value(s) - SampleGroup = 0; + // by default assigned to that mandatory "Default Group" + pGroup = pFile->GetGroup(0); } pCkSmpl = waveList->GetSubChunk(CHUNK_ID_SMPL); @@ -307,12 +406,14 @@ // use default values Manufacturer = 0; Product = 0; - SamplePeriod = 1 / SamplesPerSecond; - MIDIUnityNote = 64; + SamplePeriod = uint32_t(1000000000.0 / SamplesPerSecond + 0.5); + MIDIUnityNote = 60; FineTune = 0; + SMPTEFormat = smpte_format_no_offset; SMPTEOffset = 0; Loops = 0; LoopID = 0; + LoopType = loop_type_normal; LoopStart = 0; LoopEnd = 0; LoopFraction = 0; @@ -348,7 +449,7 @@ } FrameOffset = 0; // just for streaming compressed samples - LoopSize = LoopEnd - LoopStart; + LoopSize = LoopEnd - LoopStart + 1; } /** @@ -358,7 +459,7 @@ * Usually there is absolutely no need to call this method explicitly. * It will be called automatically when File::Save() was called. * - * @throws DLS::Exception if FormatTag != WAVE_FORMAT_PCM or no sample data + * @throws DLS::Exception if FormatTag != DLS_WAVE_FORMAT_PCM or no sample data * was provided yet * @throws gig::Exception if there is any invalid sample setting */ @@ -368,34 +469,50 @@ // make sure 'smpl' chunk exists pCkSmpl = pWaveList->GetSubChunk(CHUNK_ID_SMPL); - if (!pCkSmpl) pCkSmpl = pWaveList->AddSubChunk(CHUNK_ID_SMPL, 60); + if (!pCkSmpl) { + pCkSmpl = pWaveList->AddSubChunk(CHUNK_ID_SMPL, 60); + memset(pCkSmpl->LoadChunkData(), 0, 60); + } // update 'smpl' chunk uint8_t* pData = (uint8_t*) pCkSmpl->LoadChunkData(); - SamplePeriod = 1 / SamplesPerSecond; - memcpy(&pData[0], &Manufacturer, 4); - memcpy(&pData[4], &Product, 4); - memcpy(&pData[8], &SamplePeriod, 4); - memcpy(&pData[12], &MIDIUnityNote, 4); - memcpy(&pData[16], &FineTune, 4); - memcpy(&pData[20], &SMPTEFormat, 4); - memcpy(&pData[24], &SMPTEOffset, 4); - memcpy(&pData[28], &Loops, 4); + SamplePeriod = uint32_t(1000000000.0 / SamplesPerSecond + 0.5); + store32(&pData[0], Manufacturer); + store32(&pData[4], Product); + store32(&pData[8], SamplePeriod); + store32(&pData[12], MIDIUnityNote); + store32(&pData[16], FineTune); + store32(&pData[20], SMPTEFormat); + store32(&pData[24], SMPTEOffset); + store32(&pData[28], Loops); // we skip 'manufByt' for now (4 bytes) - memcpy(&pData[36], &LoopID, 4); - memcpy(&pData[40], &LoopType, 4); - memcpy(&pData[44], &LoopStart, 4); - memcpy(&pData[48], &LoopEnd, 4); - memcpy(&pData[52], &LoopFraction, 4); - memcpy(&pData[56], &LoopPlayCount, 4); + store32(&pData[36], LoopID); + store32(&pData[40], LoopType); + store32(&pData[44], LoopStart); + store32(&pData[48], LoopEnd); + store32(&pData[52], LoopFraction); + store32(&pData[56], LoopPlayCount); // make sure '3gix' chunk exists pCk3gix = pWaveList->GetSubChunk(CHUNK_ID_3GIX); if (!pCk3gix) pCk3gix = pWaveList->AddSubChunk(CHUNK_ID_3GIX, 4); + // determine appropriate sample group index (to be stored in chunk) + uint16_t iSampleGroup = 0; // 0 refers to default sample group + File* pFile = static_cast(pParent); + if (pFile->pGroups) { + std::list::iterator iter = pFile->pGroups->begin(); + std::list::iterator end = pFile->pGroups->end(); + for (int i = 0; iter != end; i++, iter++) { + if (*iter == pGroup) { + iSampleGroup = i; + break; // found + } + } + } // update '3gix' chunk pData = (uint8_t*) pCk3gix->LoadChunkData(); - memcpy(&pData[0], &SampleGroup, 2); + store16(&pData[0], iSampleGroup); } /// Scans compressed samples for mandatory informations (e.g. actual number of total sample points). @@ -616,13 +733,13 @@ * enlarged samples before calling File::Save() as this might exceed the * current sample's boundary! * - * Also note: only WAVE_FORMAT_PCM is currently supported, that is - * FormatTag must be WAVE_FORMAT_PCM. Trying to resize samples with + * Also note: only DLS_WAVE_FORMAT_PCM is currently supported, that is + * FormatTag must be DLS_WAVE_FORMAT_PCM. Trying to resize samples with * other formats will fail! * * @param iNewSize - new sample wave data size in sample points (must be * greater than zero) - * @throws DLS::Excecption if FormatTag != WAVE_FORMAT_PCM + * @throws DLS::Excecption if FormatTag != DLS_WAVE_FORMAT_PCM * or if \a iNewSize is less than 1 * @throws gig::Exception if existing sample is compressed * @see DLS::Sample::GetSize(), DLS::Sample::FrameSize, @@ -722,139 +839,146 @@ * @param SampleCount number of sample points to read * @param pPlaybackState will be used to store and reload the playback * state for the next ReadAndLoop() call + * @param pDimRgn dimension region with looping information * @param pExternalDecompressionBuffer (optional) external buffer to use for decompression * @returns number of successfully read sample points * @see CreateDecompressionBuffer() */ - unsigned long Sample::ReadAndLoop(void* pBuffer, unsigned long SampleCount, playback_state_t* pPlaybackState, buffer_t* pExternalDecompressionBuffer) { + unsigned long Sample::ReadAndLoop(void* pBuffer, unsigned long SampleCount, playback_state_t* pPlaybackState, + DimensionRegion* pDimRgn, buffer_t* pExternalDecompressionBuffer) { unsigned long samplestoread = SampleCount, totalreadsamples = 0, readsamples, samplestoloopend; uint8_t* pDst = (uint8_t*) pBuffer; SetPos(pPlaybackState->position); // recover position from the last time - if (this->Loops && GetPos() <= this->LoopEnd) { // honor looping if there are loop points defined + if (pDimRgn->SampleLoops) { // honor looping if there are loop points defined - switch (this->LoopType) { + const DLS::sample_loop_t& loop = pDimRgn->pSampleLoops[0]; + const uint32_t loopEnd = loop.LoopStart + loop.LoopLength; - case loop_type_bidirectional: { //TODO: not tested yet! - do { - // if not endless loop check if max. number of loop cycles have been passed - if (this->LoopPlayCount && !pPlaybackState->loop_cycles_left) break; - - if (!pPlaybackState->reverse) { // forward playback - do { - samplestoloopend = this->LoopEnd - GetPos(); - readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoread, samplestoloopend), pExternalDecompressionBuffer); - samplestoread -= readsamples; - totalreadsamples += readsamples; - if (readsamples == samplestoloopend) { - pPlaybackState->reverse = true; - break; - } - } while (samplestoread && readsamples); - } - else { // backward playback + if (GetPos() <= loopEnd) { + switch (loop.LoopType) { - // as we can only read forward from disk, we have to - // determine the end position within the loop first, - // read forward from that 'end' and finally after - // reading, swap all sample frames so it reflects - // backward playback - - unsigned long swapareastart = totalreadsamples; - unsigned long loopoffset = GetPos() - this->LoopStart; - unsigned long samplestoreadinloop = Min(samplestoread, loopoffset); - unsigned long reverseplaybackend = GetPos() - samplestoreadinloop; - - SetPos(reverseplaybackend); - - // read samples for backward playback - do { - readsamples = Read(&pDst[totalreadsamples * this->FrameSize], samplestoreadinloop, pExternalDecompressionBuffer); - samplestoreadinloop -= readsamples; - samplestoread -= readsamples; - totalreadsamples += readsamples; - } while (samplestoreadinloop && readsamples); + case loop_type_bidirectional: { //TODO: not tested yet! + do { + // if not endless loop check if max. number of loop cycles have been passed + if (this->LoopPlayCount && !pPlaybackState->loop_cycles_left) break; + + if (!pPlaybackState->reverse) { // forward playback + do { + samplestoloopend = loopEnd - GetPos(); + readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoread, samplestoloopend), pExternalDecompressionBuffer); + samplestoread -= readsamples; + totalreadsamples += readsamples; + if (readsamples == samplestoloopend) { + pPlaybackState->reverse = true; + break; + } + } while (samplestoread && readsamples); + } + else { // backward playback - SetPos(reverseplaybackend); // pretend we really read backwards + // as we can only read forward from disk, we have to + // determine the end position within the loop first, + // read forward from that 'end' and finally after + // reading, swap all sample frames so it reflects + // backward playback + + unsigned long swapareastart = totalreadsamples; + unsigned long loopoffset = GetPos() - loop.LoopStart; + unsigned long samplestoreadinloop = Min(samplestoread, loopoffset); + unsigned long reverseplaybackend = GetPos() - samplestoreadinloop; + + SetPos(reverseplaybackend); + + // read samples for backward playback + do { + readsamples = Read(&pDst[totalreadsamples * this->FrameSize], samplestoreadinloop, pExternalDecompressionBuffer); + samplestoreadinloop -= readsamples; + samplestoread -= readsamples; + totalreadsamples += readsamples; + } while (samplestoreadinloop && readsamples); + + SetPos(reverseplaybackend); // pretend we really read backwards + + if (reverseplaybackend == loop.LoopStart) { + pPlaybackState->loop_cycles_left--; + pPlaybackState->reverse = false; + } - if (reverseplaybackend == this->LoopStart) { - pPlaybackState->loop_cycles_left--; - pPlaybackState->reverse = false; + // reverse the sample frames for backward playback + SwapMemoryArea(&pDst[swapareastart * this->FrameSize], (totalreadsamples - swapareastart) * this->FrameSize, this->FrameSize); } + } while (samplestoread && readsamples); + break; + } - // reverse the sample frames for backward playback - SwapMemoryArea(&pDst[swapareastart * this->FrameSize], (totalreadsamples - swapareastart) * this->FrameSize, this->FrameSize); - } - } while (samplestoread && readsamples); - break; - } - - case loop_type_backward: { // TODO: not tested yet! - // forward playback (not entered the loop yet) - if (!pPlaybackState->reverse) do { - samplestoloopend = this->LoopEnd - GetPos(); - readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoread, samplestoloopend), pExternalDecompressionBuffer); - samplestoread -= readsamples; - totalreadsamples += readsamples; - if (readsamples == samplestoloopend) { - pPlaybackState->reverse = true; - break; - } - } while (samplestoread && readsamples); + case loop_type_backward: { // TODO: not tested yet! + // forward playback (not entered the loop yet) + if (!pPlaybackState->reverse) do { + samplestoloopend = loopEnd - GetPos(); + readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoread, samplestoloopend), pExternalDecompressionBuffer); + samplestoread -= readsamples; + totalreadsamples += readsamples; + if (readsamples == samplestoloopend) { + pPlaybackState->reverse = true; + break; + } + } while (samplestoread && readsamples); - if (!samplestoread) break; + if (!samplestoread) break; - // as we can only read forward from disk, we have to - // determine the end position within the loop first, - // read forward from that 'end' and finally after - // reading, swap all sample frames so it reflects - // backward playback - - unsigned long swapareastart = totalreadsamples; - unsigned long loopoffset = GetPos() - this->LoopStart; - unsigned long samplestoreadinloop = (this->LoopPlayCount) ? Min(samplestoread, pPlaybackState->loop_cycles_left * LoopSize - loopoffset) - : samplestoread; - unsigned long reverseplaybackend = this->LoopStart + Abs((loopoffset - samplestoreadinloop) % this->LoopSize); - - SetPos(reverseplaybackend); - - // read samples for backward playback - do { - // if not endless loop check if max. number of loop cycles have been passed - if (this->LoopPlayCount && !pPlaybackState->loop_cycles_left) break; - samplestoloopend = this->LoopEnd - GetPos(); - readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoreadinloop, samplestoloopend), pExternalDecompressionBuffer); - samplestoreadinloop -= readsamples; - samplestoread -= readsamples; - totalreadsamples += readsamples; - if (readsamples == samplestoloopend) { - pPlaybackState->loop_cycles_left--; - SetPos(this->LoopStart); - } - } while (samplestoreadinloop && readsamples); + // as we can only read forward from disk, we have to + // determine the end position within the loop first, + // read forward from that 'end' and finally after + // reading, swap all sample frames so it reflects + // backward playback + + unsigned long swapareastart = totalreadsamples; + unsigned long loopoffset = GetPos() - loop.LoopStart; + unsigned long samplestoreadinloop = (this->LoopPlayCount) ? Min(samplestoread, pPlaybackState->loop_cycles_left * loop.LoopLength - loopoffset) + : samplestoread; + unsigned long reverseplaybackend = loop.LoopStart + Abs((loopoffset - samplestoreadinloop) % loop.LoopLength); + + SetPos(reverseplaybackend); + + // read samples for backward playback + do { + // if not endless loop check if max. number of loop cycles have been passed + if (this->LoopPlayCount && !pPlaybackState->loop_cycles_left) break; + samplestoloopend = loopEnd - GetPos(); + readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoreadinloop, samplestoloopend), pExternalDecompressionBuffer); + samplestoreadinloop -= readsamples; + samplestoread -= readsamples; + totalreadsamples += readsamples; + if (readsamples == samplestoloopend) { + pPlaybackState->loop_cycles_left--; + SetPos(loop.LoopStart); + } + } while (samplestoreadinloop && readsamples); - SetPos(reverseplaybackend); // pretend we really read backwards + SetPos(reverseplaybackend); // pretend we really read backwards - // reverse the sample frames for backward playback - SwapMemoryArea(&pDst[swapareastart * this->FrameSize], (totalreadsamples - swapareastart) * this->FrameSize, this->FrameSize); - break; - } + // reverse the sample frames for backward playback + SwapMemoryArea(&pDst[swapareastart * this->FrameSize], (totalreadsamples - swapareastart) * this->FrameSize, this->FrameSize); + break; + } - default: case loop_type_normal: { - do { - // if not endless loop check if max. number of loop cycles have been passed - if (this->LoopPlayCount && !pPlaybackState->loop_cycles_left) break; - samplestoloopend = this->LoopEnd - GetPos(); - readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoread, samplestoloopend), pExternalDecompressionBuffer); - samplestoread -= readsamples; - totalreadsamples += readsamples; - if (readsamples == samplestoloopend) { - pPlaybackState->loop_cycles_left--; - SetPos(this->LoopStart); - } - } while (samplestoread && readsamples); - break; + default: case loop_type_normal: { + do { + // if not endless loop check if max. number of loop cycles have been passed + if (this->LoopPlayCount && !pPlaybackState->loop_cycles_left) break; + samplestoloopend = loopEnd - GetPos(); + readsamples = Read(&pDst[totalreadsamples * this->FrameSize], Min(samplestoread, samplestoloopend), pExternalDecompressionBuffer); + samplestoread -= readsamples; + totalreadsamples += readsamples; + if (readsamples == samplestoloopend) { + pPlaybackState->loop_cycles_left--; + SetPos(loop.LoopStart); + } + } while (samplestoread && readsamples); + break; + } } } } @@ -884,6 +1008,10 @@ * have to use an external decompression buffer for EACH * streaming thread to avoid race conditions and crashes! * + * For 16 bit samples, the data in the buffer will be int16_t + * (using native endianness). For 24 bit, the buffer will + * contain three bytes per sample, little-endian. + * * @param pBuffer destination buffer * @param SampleCount number of sample points to read * @param pExternalDecompressionBuffer (optional) external buffer to use for decompression @@ -894,27 +1022,7 @@ if (SampleCount == 0) return 0; if (!Compressed) { if (BitDepth == 24) { - // 24 bit sample. For now just truncate to 16 bit. - unsigned char* pSrc = (unsigned char*) ((pExternalDecompressionBuffer) ? pExternalDecompressionBuffer->pStart : this->InternalDecompressionBuffer.pStart); - int16_t* pDst = static_cast(pBuffer); - if (Channels == 2) { // Stereo - unsigned long readBytes = pCkData->Read(pSrc, SampleCount * 6, 1); - pSrc++; - for (unsigned long i = readBytes ; i > 0 ; i -= 3) { - *pDst++ = get16(pSrc); - pSrc += 3; - } - return (pDst - static_cast(pBuffer)) >> 1; - } - else { // Mono - unsigned long readBytes = pCkData->Read(pSrc, SampleCount * 3, 1); - pSrc++; - for (unsigned long i = readBytes ; i > 0 ; i -= 3) { - *pDst++ = get16(pSrc); - pSrc += 3; - } - return pDst - static_cast(pBuffer); - } + return pCkData->Read(pBuffer, SampleCount * FrameSize, 1) / FrameSize; } else { // 16 bit // (pCkData->Read does endian correction) @@ -944,6 +1052,7 @@ unsigned char* pSrc = (unsigned char*) pDecompressionBuffer->pStart; int16_t* pDst = static_cast(pBuffer); + uint8_t* pDst24 = static_cast(pBuffer); remainingbytes = pCkData->Read(pSrc, assumedsize, 1); while (remainingsamples && remainingbytes) { @@ -1025,16 +1134,16 @@ const unsigned char* const param_r = pSrc; if (mode_r != 2) pSrc += 12; - Decompress24(mode_l, param_l, 2, pSrc, pDst, + Decompress24(mode_l, param_l, 6, pSrc, pDst24, skipsamples, copysamples, TruncatedBits); - Decompress24(mode_r, param_r, 2, pSrc + rightChannelOffset, pDst + 1, + Decompress24(mode_r, param_r, 6, pSrc + rightChannelOffset, pDst24 + 3, skipsamples, copysamples, TruncatedBits); - pDst += copysamples << 1; + pDst24 += copysamples * 6; } else { // Mono - Decompress24(mode_l, param_l, 1, pSrc, pDst, + Decompress24(mode_l, param_l, 3, pSrc, pDst24, skipsamples, copysamples, TruncatedBits); - pDst += copysamples; + pDst24 += copysamples * 3; } } else { // 16 bit @@ -1088,6 +1197,10 @@ * * Note: there is currently no support for writing compressed samples. * + * For 16 bit samples, the data in the source buffer should be + * int16_t (using native endianness). For 24 bit, the buffer + * should contain three bytes per sample, little-endian. + * * @param pBuffer - source buffer * @param SampleCount - number of sample points to write * @throws DLS::Exception if current sample size is too small @@ -1096,7 +1209,29 @@ */ unsigned long Sample::Write(void* pBuffer, unsigned long SampleCount) { if (Compressed) throw gig::Exception("There is no support for writing compressed gig samples (yet)"); - return DLS::Sample::Write(pBuffer, SampleCount); + + // if this is the first write in this sample, reset the + // checksum calculator + if (pCkData->GetPos() == 0) { + __resetCRC(crc); + } + if (GetSize() < SampleCount) throw Exception("Could not write sample data, current sample size to small"); + unsigned long res; + if (BitDepth == 24) { + res = pCkData->Write(pBuffer, SampleCount * FrameSize, 1) / FrameSize; + } else { // 16 bit + res = Channels == 2 ? pCkData->Write(pBuffer, SampleCount << 1, 2) >> 1 + : pCkData->Write(pBuffer, SampleCount, 2); + } + __calculateCRC((unsigned char *)pBuffer, SampleCount * FrameSize, crc); + + // if this is the last write, update the checksum chunk in the + // file + if (pCkData->GetPos() == pCkData->GetSize()) { + File* pFile = static_cast(GetParent()); + pFile->SetSampleChecksum(this, __encodeCRC(crc)); + } + return res; } /** @@ -1141,6 +1276,18 @@ } } + /** + * Returns pointer to the Group this Sample belongs to. In the .gig + * format a sample always belongs to one group. If it wasn't explicitly + * assigned to a certain group, it will be automatically assigned to a + * default group. + * + * @returns Sample's Group (never NULL) + */ + Group* Sample::GetGroup() const { + return pGroup; + } + Sample::~Sample() { Instances--; if (!Instances && InternalDecompressionBuffer.Size) { @@ -1160,17 +1307,20 @@ uint DimensionRegion::Instances = 0; DimensionRegion::VelocityTableMap* DimensionRegion::pVelocityTables = NULL; - DimensionRegion::DimensionRegion(RIFF::List* _3ewl) : DLS::Sampler(_3ewl) { + DimensionRegion::DimensionRegion(Region* pParent, RIFF::List* _3ewl) : DLS::Sampler(_3ewl) { Instances++; pSample = NULL; + pRegion = pParent; + + if (_3ewl->GetSubChunk(CHUNK_ID_WSMP)) memcpy(&Crossfade, &SamplerOptions, 4); + else memset(&Crossfade, 0, 4); - memcpy(&Crossfade, &SamplerOptions, 4); if (!pVelocityTables) pVelocityTables = new VelocityTableMap; RIFF::Chunk* _3ewa = _3ewl->GetSubChunk(CHUNK_ID_3EWA); if (_3ewa) { // if '3ewa' chunk exists - _3ewa->ReadInt32(); // unknown, always 0x0000008C ? + _3ewa->ReadInt32(); // unknown, always == chunk size ? LFO3Frequency = (double) GIG_EXP_DECODE(_3ewa->ReadInt32()); EG3Attack = (double) GIG_EXP_DECODE(_3ewa->ReadInt32()); _3ewa->ReadInt16(); // unknown @@ -1315,6 +1465,11 @@ if (lfo3ctrl & 0x40) // bit 6 VCFType = vcf_type_lowpassturbo; } + if (_3ewa->RemainingBytes() >= 8) { + _3ewa->Read(DimensionUpperLimits, 1, 8); + } else { + memset(DimensionUpperLimits, 0, 8); + } } else { // '3ewa' chunk does not exist yet // use default values LFO3Frequency = 1.0; @@ -1324,9 +1479,9 @@ LFO1ControlDepth = 0; LFO3ControlDepth = 0; EG1Attack = 0.0; - EG1Decay1 = 0.0; - EG1Sustain = 0; - EG1Release = 0.0; + EG1Decay1 = 0.005; + EG1Sustain = 1000; + EG1Release = 0.3; EG1Controller.type = eg1_ctrl_t::type_none; EG1Controller.controller_number = 0; EG1ControllerInvert = false; @@ -1341,18 +1496,18 @@ EG2ControllerReleaseInfluence = 0; LFO1Frequency = 1.0; EG2Attack = 0.0; - EG2Decay1 = 0.0; - EG2Sustain = 0; - EG2Release = 0.0; + EG2Decay1 = 0.005; + EG2Sustain = 1000; + EG2Release = 0.3; LFO2ControlDepth = 0; LFO2Frequency = 1.0; LFO2InternalDepth = 0; EG1Decay2 = 0.0; - EG1InfiniteSustain = false; - EG1PreAttack = 1000; + EG1InfiniteSustain = true; + EG1PreAttack = 0; EG2Decay2 = 0.0; - EG2InfiniteSustain = false; - EG2PreAttack = 1000; + EG2InfiniteSustain = true; + EG2PreAttack = 0; VelocityResponseCurve = curve_type_nonlinear; VelocityResponseDepth = 3; ReleaseVelocityResponseCurve = curve_type_nonlinear; @@ -1395,41 +1550,56 @@ VCFVelocityDynamicRange = 0x04; VCFVelocityCurve = curve_type_linear; VCFType = vcf_type_lowpass; + memset(DimensionUpperLimits, 127, 8); } pVelocityAttenuationTable = GetVelocityTable(VelocityResponseCurve, VelocityResponseDepth, VelocityResponseCurveScaling); - curve_type_t curveType = ReleaseVelocityResponseCurve; - uint8_t depth = ReleaseVelocityResponseDepth; + pVelocityReleaseTable = GetReleaseVelocityTable( + ReleaseVelocityResponseCurve, + ReleaseVelocityResponseDepth + ); + + pVelocityCutoffTable = GetCutoffVelocityTable(VCFVelocityCurve, + VCFVelocityDynamicRange, + VCFVelocityScale, + VCFCutoffController); - // this models a strange behaviour or bug in GSt: two of the - // velocity response curves for release time are not used even - // if specified, instead another curve is chosen. - if ((curveType == curve_type_nonlinear && depth == 0) || - (curveType == curve_type_special && depth == 4)) { - curveType = curve_type_nonlinear; - depth = 3; - } - pVelocityReleaseTable = GetVelocityTable(curveType, depth, 0); + SampleAttenuation = pow(10.0, -Gain / (20.0 * 655360)); + VelocityTable = 0; + } - curveType = VCFVelocityCurve; - depth = VCFVelocityDynamicRange; + /* + * Constructs a DimensionRegion by copying all parameters from + * another DimensionRegion + */ + DimensionRegion::DimensionRegion(RIFF::List* _3ewl, const DimensionRegion& src) : DLS::Sampler(_3ewl) { + Instances++; + *this = src; // default memberwise shallow copy of all parameters + pParentList = _3ewl; // restore the chunk pointer - // even stranger GSt: two of the velocity response curves for - // filter cutoff are not used, instead another special curve - // is chosen. This curve is not used anywhere else. - if ((curveType == curve_type_nonlinear && depth == 0) || - (curveType == curve_type_special && depth == 4)) { - curveType = curve_type_special; - depth = 5; + // deep copy of owned structures + if (src.VelocityTable) { + VelocityTable = new uint8_t[128]; + for (int k = 0 ; k < 128 ; k++) + VelocityTable[k] = src.VelocityTable[k]; } - pVelocityCutoffTable = GetVelocityTable(curveType, depth, - VCFCutoffController <= vcf_cutoff_ctrl_none2 ? VCFVelocityScale : 0); + if (src.pSampleLoops) { + pSampleLoops = new DLS::sample_loop_t[src.SampleLoops]; + for (int k = 0 ; k < src.SampleLoops ; k++) + pSampleLoops[k] = src.pSampleLoops[k]; + } + } + /** + * Updates the respective member variable and updates @c SampleAttenuation + * which depends on this value. + */ + void DimensionRegion::SetGain(int32_t gain) { + DLS::Sampler::SetGain(gain); SampleAttenuation = pow(10.0, -Gain / (20.0 * 655360)); - VelocityTable = 0; } /** @@ -1443,111 +1613,122 @@ // first update base class's chunk DLS::Sampler::UpdateChunks(); + RIFF::Chunk* wsmp = pParentList->GetSubChunk(CHUNK_ID_WSMP); + uint8_t* pData = (uint8_t*) wsmp->LoadChunkData(); + pData[12] = Crossfade.in_start; + pData[13] = Crossfade.in_end; + pData[14] = Crossfade.out_start; + pData[15] = Crossfade.out_end; + // make sure '3ewa' chunk exists RIFF::Chunk* _3ewa = pParentList->GetSubChunk(CHUNK_ID_3EWA); - if (!_3ewa) _3ewa = pParentList->AddSubChunk(CHUNK_ID_3EWA, 140); - uint8_t* pData = (uint8_t*) _3ewa->LoadChunkData(); + if (!_3ewa) { + File* pFile = (File*) GetParent()->GetParent()->GetParent(); + bool version3 = pFile->pVersion && pFile->pVersion->major == 3; + _3ewa = pParentList->AddSubChunk(CHUNK_ID_3EWA, version3 ? 148 : 140); + } + pData = (uint8_t*) _3ewa->LoadChunkData(); // update '3ewa' chunk with DimensionRegion's current settings - const uint32_t unknown = 0x0000008C; // unknown, always 0x0000008C ? - memcpy(&pData[0], &unknown, 4); + const uint32_t chunksize = _3ewa->GetNewSize(); + store32(&pData[0], chunksize); // unknown, always chunk size? const int32_t lfo3freq = (int32_t) GIG_EXP_ENCODE(LFO3Frequency); - memcpy(&pData[4], &lfo3freq, 4); + store32(&pData[4], lfo3freq); const int32_t eg3attack = (int32_t) GIG_EXP_ENCODE(EG3Attack); - memcpy(&pData[4], &eg3attack, 4); + store32(&pData[8], eg3attack); // next 2 bytes unknown - memcpy(&pData[10], &LFO1InternalDepth, 2); + store16(&pData[14], LFO1InternalDepth); // next 2 bytes unknown - memcpy(&pData[14], &LFO3InternalDepth, 2); + store16(&pData[18], LFO3InternalDepth); // next 2 bytes unknown - memcpy(&pData[18], &LFO1ControlDepth, 2); + store16(&pData[22], LFO1ControlDepth); // next 2 bytes unknown - memcpy(&pData[22], &LFO3ControlDepth, 2); + store16(&pData[26], LFO3ControlDepth); const int32_t eg1attack = (int32_t) GIG_EXP_ENCODE(EG1Attack); - memcpy(&pData[24], &eg1attack, 4); + store32(&pData[28], eg1attack); const int32_t eg1decay1 = (int32_t) GIG_EXP_ENCODE(EG1Decay1); - memcpy(&pData[28], &eg1decay1, 4); + store32(&pData[32], eg1decay1); // next 2 bytes unknown - memcpy(&pData[34], &EG1Sustain, 2); + store16(&pData[38], EG1Sustain); const int32_t eg1release = (int32_t) GIG_EXP_ENCODE(EG1Release); - memcpy(&pData[36], &eg1release, 4); + store32(&pData[40], eg1release); const uint8_t eg1ctl = (uint8_t) EncodeLeverageController(EG1Controller); - memcpy(&pData[40], &eg1ctl, 1); + pData[44] = eg1ctl; const uint8_t eg1ctrloptions = - (EG1ControllerInvert) ? 0x01 : 0x00 | + (EG1ControllerInvert ? 0x01 : 0x00) | GIG_EG_CTR_ATTACK_INFLUENCE_ENCODE(EG1ControllerAttackInfluence) | GIG_EG_CTR_DECAY_INFLUENCE_ENCODE(EG1ControllerDecayInfluence) | GIG_EG_CTR_RELEASE_INFLUENCE_ENCODE(EG1ControllerReleaseInfluence); - memcpy(&pData[41], &eg1ctrloptions, 1); + pData[45] = eg1ctrloptions; const uint8_t eg2ctl = (uint8_t) EncodeLeverageController(EG2Controller); - memcpy(&pData[42], &eg2ctl, 1); + pData[46] = eg2ctl; const uint8_t eg2ctrloptions = - (EG2ControllerInvert) ? 0x01 : 0x00 | + (EG2ControllerInvert ? 0x01 : 0x00) | GIG_EG_CTR_ATTACK_INFLUENCE_ENCODE(EG2ControllerAttackInfluence) | GIG_EG_CTR_DECAY_INFLUENCE_ENCODE(EG2ControllerDecayInfluence) | GIG_EG_CTR_RELEASE_INFLUENCE_ENCODE(EG2ControllerReleaseInfluence); - memcpy(&pData[43], &eg2ctrloptions, 1); + pData[47] = eg2ctrloptions; const int32_t lfo1freq = (int32_t) GIG_EXP_ENCODE(LFO1Frequency); - memcpy(&pData[44], &lfo1freq, 4); + store32(&pData[48], lfo1freq); const int32_t eg2attack = (int32_t) GIG_EXP_ENCODE(EG2Attack); - memcpy(&pData[48], &eg2attack, 4); + store32(&pData[52], eg2attack); const int32_t eg2decay1 = (int32_t) GIG_EXP_ENCODE(EG2Decay1); - memcpy(&pData[52], &eg2decay1, 4); + store32(&pData[56], eg2decay1); // next 2 bytes unknown - memcpy(&pData[58], &EG2Sustain, 2); + store16(&pData[62], EG2Sustain); const int32_t eg2release = (int32_t) GIG_EXP_ENCODE(EG2Release); - memcpy(&pData[60], &eg2release, 4); + store32(&pData[64], eg2release); // next 2 bytes unknown - memcpy(&pData[66], &LFO2ControlDepth, 2); + store16(&pData[70], LFO2ControlDepth); const int32_t lfo2freq = (int32_t) GIG_EXP_ENCODE(LFO2Frequency); - memcpy(&pData[68], &lfo2freq, 4); + store32(&pData[72], lfo2freq); // next 2 bytes unknown - memcpy(&pData[72], &LFO2InternalDepth, 2); + store16(&pData[78], LFO2InternalDepth); const int32_t eg1decay2 = (int32_t) (EG1InfiniteSustain) ? 0x7fffffff : (int32_t) GIG_EXP_ENCODE(EG1Decay2); - memcpy(&pData[74], &eg1decay2, 4); + store32(&pData[80], eg1decay2); // next 2 bytes unknown - memcpy(&pData[80], &EG1PreAttack, 2); + store16(&pData[86], EG1PreAttack); const int32_t eg2decay2 = (int32_t) (EG2InfiniteSustain) ? 0x7fffffff : (int32_t) GIG_EXP_ENCODE(EG2Decay2); - memcpy(&pData[82], &eg2decay2, 4); + store32(&pData[88], eg2decay2); // next 2 bytes unknown - memcpy(&pData[88], &EG2PreAttack, 2); + store16(&pData[94], EG2PreAttack); { if (VelocityResponseDepth > 4) throw Exception("VelocityResponseDepth must be between 0 and 4"); @@ -1565,7 +1746,7 @@ default: throw Exception("Could not update DimensionRegion's chunk, unknown VelocityResponseCurve selected"); } - memcpy(&pData[90], &velocityresponse, 1); + pData[96] = velocityresponse; } { @@ -1584,16 +1765,16 @@ default: throw Exception("Could not update DimensionRegion's chunk, unknown ReleaseVelocityResponseCurve selected"); } - memcpy(&pData[91], &releasevelocityresponse, 1); + pData[97] = releasevelocityresponse; } - memcpy(&pData[92], &VelocityResponseCurveScaling, 1); + pData[98] = VelocityResponseCurveScaling; - memcpy(&pData[93], &AttenuationControllerThreshold, 1); + pData[99] = AttenuationControllerThreshold; // next 4 bytes unknown - memcpy(&pData[98], &SampleStartOffset, 2); + store16(&pData[104], SampleStartOffset); // next 2 bytes unknown @@ -1612,14 +1793,14 @@ default: throw Exception("Could not update DimensionRegion's chunk, unknown DimensionBypass selected"); } - memcpy(&pData[102], &pitchTrackDimensionBypass, 1); + pData[108] = pitchTrackDimensionBypass; } const uint8_t pan = (Pan >= 0) ? Pan : ((-Pan) + 63); // signed 8 bit -> signed 7 bit - memcpy(&pData[103], &pan, 1); + pData[109] = pan; const uint8_t selfmask = (SelfMask) ? 0x01 : 0x00; - memcpy(&pData[104], &selfmask, 1); + pData[110] = selfmask; // next byte unknown @@ -1628,18 +1809,18 @@ if (LFO3Sync) lfo3ctrl |= 0x20; // bit 5 if (InvertAttenuationController) lfo3ctrl |= 0x80; // bit 7 if (VCFType == vcf_type_lowpassturbo) lfo3ctrl |= 0x40; // bit 6 - memcpy(&pData[106], &lfo3ctrl, 1); + pData[112] = lfo3ctrl; } const uint8_t attenctl = EncodeLeverageController(AttenuationController); - memcpy(&pData[107], &attenctl, 1); + pData[113] = attenctl; { uint8_t lfo2ctrl = LFO2Controller & 0x07; // lower 3 bits if (LFO2FlipPhase) lfo2ctrl |= 0x80; // bit 7 if (LFO2Sync) lfo2ctrl |= 0x20; // bit 5 if (VCFResonanceController != vcf_res_ctrl_none) lfo2ctrl |= 0x40; // bit 6 - memcpy(&pData[108], &lfo2ctrl, 1); + pData[114] = lfo2ctrl; } { @@ -1648,64 +1829,102 @@ if (LFO1Sync) lfo1ctrl |= 0x40; // bit 6 if (VCFResonanceController != vcf_res_ctrl_none) lfo1ctrl |= GIG_VCF_RESONANCE_CTRL_ENCODE(VCFResonanceController); - memcpy(&pData[109], &lfo1ctrl, 1); + pData[115] = lfo1ctrl; } const uint16_t eg3depth = (EG3Depth >= 0) ? EG3Depth : uint16_t(((-EG3Depth) - 1) ^ 0xffff); /* binary complementary for negatives */ - memcpy(&pData[110], &eg3depth, 1); + pData[116] = eg3depth; // next 2 bytes unknown const uint8_t channeloffset = ChannelOffset * 4; - memcpy(&pData[113], &channeloffset, 1); + pData[120] = channeloffset; { uint8_t regoptions = 0; if (MSDecode) regoptions |= 0x01; // bit 0 if (SustainDefeat) regoptions |= 0x02; // bit 1 - memcpy(&pData[114], ®options, 1); + pData[121] = regoptions; } // next 2 bytes unknown - memcpy(&pData[117], &VelocityUpperLimit, 1); + pData[124] = VelocityUpperLimit; // next 3 bytes unknown - memcpy(&pData[121], &ReleaseTriggerDecay, 1); + pData[128] = ReleaseTriggerDecay; // next 2 bytes unknown const uint8_t eg1hold = (EG1Hold) ? 0x80 : 0x00; // bit 7 - memcpy(&pData[124], &eg1hold, 1); + pData[131] = eg1hold; - const uint8_t vcfcutoff = (VCFEnabled) ? 0x80 : 0x00 | /* bit 7 */ - (VCFCutoff) ? 0x7f : 0x00; /* lower 7 bits */ - memcpy(&pData[125], &vcfcutoff, 1); + const uint8_t vcfcutoff = (VCFEnabled ? 0x80 : 0x00) | /* bit 7 */ + (VCFCutoff & 0x7f); /* lower 7 bits */ + pData[132] = vcfcutoff; - memcpy(&pData[126], &VCFCutoffController, 1); + pData[133] = VCFCutoffController; - const uint8_t vcfvelscale = (VCFCutoffControllerInvert) ? 0x80 : 0x00 | /* bit 7 */ - (VCFVelocityScale) ? 0x7f : 0x00; /* lower 7 bits */ - memcpy(&pData[127], &vcfvelscale, 1); + const uint8_t vcfvelscale = (VCFCutoffControllerInvert ? 0x80 : 0x00) | /* bit 7 */ + (VCFVelocityScale & 0x7f); /* lower 7 bits */ + pData[134] = vcfvelscale; // next byte unknown - const uint8_t vcfresonance = (VCFResonanceDynamic) ? 0x00 : 0x80 | /* bit 7 */ - (VCFResonance) ? 0x7f : 0x00; /* lower 7 bits */ - memcpy(&pData[129], &vcfresonance, 1); - - const uint8_t vcfbreakpoint = (VCFKeyboardTracking) ? 0x80 : 0x00 | /* bit 7 */ - (VCFKeyboardTrackingBreakpoint) ? 0x7f : 0x00; /* lower 7 bits */ - memcpy(&pData[130], &vcfbreakpoint, 1); + const uint8_t vcfresonance = (VCFResonanceDynamic ? 0x00 : 0x80) | /* bit 7 */ + (VCFResonance & 0x7f); /* lower 7 bits */ + pData[136] = vcfresonance; + + const uint8_t vcfbreakpoint = (VCFKeyboardTracking ? 0x80 : 0x00) | /* bit 7 */ + (VCFKeyboardTrackingBreakpoint & 0x7f); /* lower 7 bits */ + pData[137] = vcfbreakpoint; const uint8_t vcfvelocity = VCFVelocityDynamicRange % 5 | VCFVelocityCurve * 5; - memcpy(&pData[131], &vcfvelocity, 1); + pData[138] = vcfvelocity; const uint8_t vcftype = (VCFType == vcf_type_lowpassturbo) ? vcf_type_lowpass : VCFType; - memcpy(&pData[132], &vcftype, 1); + pData[139] = vcftype; + + if (chunksize >= 148) { + memcpy(&pData[140], DimensionUpperLimits, 8); + } + } + + double* DimensionRegion::GetReleaseVelocityTable(curve_type_t releaseVelocityResponseCurve, uint8_t releaseVelocityResponseDepth) { + curve_type_t curveType = releaseVelocityResponseCurve; + uint8_t depth = releaseVelocityResponseDepth; + // this models a strange behaviour or bug in GSt: two of the + // velocity response curves for release time are not used even + // if specified, instead another curve is chosen. + if ((curveType == curve_type_nonlinear && depth == 0) || + (curveType == curve_type_special && depth == 4)) { + curveType = curve_type_nonlinear; + depth = 3; + } + return GetVelocityTable(curveType, depth, 0); + } + + double* DimensionRegion::GetCutoffVelocityTable(curve_type_t vcfVelocityCurve, + uint8_t vcfVelocityDynamicRange, + uint8_t vcfVelocityScale, + vcf_cutoff_ctrl_t vcfCutoffController) + { + curve_type_t curveType = vcfVelocityCurve; + uint8_t depth = vcfVelocityDynamicRange; + // even stranger GSt: two of the velocity response curves for + // filter cutoff are not used, instead another special curve + // is chosen. This curve is not used anywhere else. + if ((curveType == curve_type_nonlinear && depth == 0) || + (curveType == curve_type_special && depth == 4)) { + curveType = curve_type_special; + depth = 5; + } + return GetVelocityTable(curveType, depth, + (vcfCutoffController <= vcf_cutoff_ctrl_none2) + ? vcfVelocityScale : 0); } // get the corresponding velocity table from the table map or create & calculate that table if it doesn't exist yet @@ -1723,6 +1942,10 @@ return table; } + Region* DimensionRegion::GetParent() const { + return pRegion; + } + leverage_ctrl_t DimensionRegion::DecodeLeverageController(_lev_ctrl_t EncodedController) { leverage_ctrl_t decodedcontroller; switch (EncodedController) { @@ -1930,6 +2153,7 @@ default: throw gig::Exception("leverage controller number is not supported by the gig format"); } + break; default: throw gig::Exception("Unknown leverage controller type."); } @@ -1975,6 +2199,96 @@ return pVelocityCutoffTable[MIDIKeyVelocity]; } + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetVelocityResponseCurve(curve_type_t curve) { + pVelocityAttenuationTable = + GetVelocityTable( + curve, VelocityResponseDepth, VelocityResponseCurveScaling + ); + VelocityResponseCurve = curve; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetVelocityResponseDepth(uint8_t depth) { + pVelocityAttenuationTable = + GetVelocityTable( + VelocityResponseCurve, depth, VelocityResponseCurveScaling + ); + VelocityResponseDepth = depth; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetVelocityResponseCurveScaling(uint8_t scaling) { + pVelocityAttenuationTable = + GetVelocityTable( + VelocityResponseCurve, VelocityResponseDepth, scaling + ); + VelocityResponseCurveScaling = scaling; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetReleaseVelocityResponseCurve(curve_type_t curve) { + pVelocityReleaseTable = GetReleaseVelocityTable(curve, ReleaseVelocityResponseDepth); + ReleaseVelocityResponseCurve = curve; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetReleaseVelocityResponseDepth(uint8_t depth) { + pVelocityReleaseTable = GetReleaseVelocityTable(ReleaseVelocityResponseCurve, depth); + ReleaseVelocityResponseDepth = depth; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetVCFCutoffController(vcf_cutoff_ctrl_t controller) { + pVelocityCutoffTable = GetCutoffVelocityTable(VCFVelocityCurve, VCFVelocityDynamicRange, VCFVelocityScale, controller); + VCFCutoffController = controller; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetVCFVelocityCurve(curve_type_t curve) { + pVelocityCutoffTable = GetCutoffVelocityTable(curve, VCFVelocityDynamicRange, VCFVelocityScale, VCFCutoffController); + VCFVelocityCurve = curve; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetVCFVelocityDynamicRange(uint8_t range) { + pVelocityCutoffTable = GetCutoffVelocityTable(VCFVelocityCurve, range, VCFVelocityScale, VCFCutoffController); + VCFVelocityDynamicRange = range; + } + + /** + * Updates the respective member variable and the lookup table / cache + * that depends on this value. + */ + void DimensionRegion::SetVCFVelocityScale(uint8_t scaling) { + pVelocityCutoffTable = GetCutoffVelocityTable(VCFVelocityCurve, VCFVelocityDynamicRange, scaling, VCFCutoffController); + VCFVelocityScale = scaling; + } + double* DimensionRegion::CreateVelocityTable(curve_type_t curveType, uint8_t depth, uint8_t scaling) { // line-segment approximations of the 15 velocity curves @@ -2066,8 +2380,8 @@ for (int i = 0; i < dimensionBits; i++) { dimension_t dimension = static_cast(_3lnk->ReadUint8()); uint8_t bits = _3lnk->ReadUint8(); - _3lnk->ReadUint8(); // probably the position of the dimension - _3lnk->ReadUint8(); // unknown + _3lnk->ReadUint8(); // bit position of the dimension (bits[0] + bits[1] + ... + bits[i-1]) + _3lnk->ReadUint8(); // (1 << bit position of next dimension) - (1 << bit position of this dimension) uint8_t zones = _3lnk->ReadUint8(); // new for v3: number of zones doesn't have to be == pow(2,bits) if (dimension == dimension_none) { // inactive dimension pDimensionDefinitions[i].dimension = dimension_none; @@ -2080,15 +2394,8 @@ pDimensionDefinitions[i].dimension = dimension; pDimensionDefinitions[i].bits = bits; pDimensionDefinitions[i].zones = zones ? zones : 0x01 << bits; // = pow(2,bits) - pDimensionDefinitions[i].split_type = (dimension == dimension_layer || - dimension == dimension_samplechannel || - dimension == dimension_releasetrigger || - dimension == dimension_roundrobin || - dimension == dimension_random) ? split_type_bit - : split_type_normal; - pDimensionDefinitions[i].zone_size = - (pDimensionDefinitions[i].split_type == split_type_normal) ? 128.0 / pDimensionDefinitions[i].zones - : 0; + pDimensionDefinitions[i].split_type = __resolveSplitType(dimension); + pDimensionDefinitions[i].zone_size = __resolveZoneSize(pDimensionDefinitions[i]); Dimensions++; // if this is a layer dimension, remember the amount of layers @@ -2111,7 +2418,15 @@ // load sample references for (uint i = 0; i < DimensionRegions; i++) { uint32_t wavepoolindex = _3lnk->ReadUint32(); - pDimensionRegions[i]->pSample = GetSampleFromWavePool(wavepoolindex); + if (file->pWavePoolTable) pDimensionRegions[i]->pSample = GetSampleFromWavePool(wavepoolindex); + } + GetSample(); // load global region sample reference + } else { + DimensionRegions = 0; + for (int i = 0 ; i < 8 ; i++) { + pDimensionDefinitions[i].dimension = dimension_none; + pDimensionDefinitions[i].bits = 0; + pDimensionDefinitions[i].zones = 0; } } @@ -2120,7 +2435,7 @@ RIFF::List* _3prg = rgnList->GetSubList(LIST_TYPE_3PRG); if (!_3prg) _3prg = rgnList->AddSubList(LIST_TYPE_3PRG); RIFF::List* _3ewl = _3prg->AddSubList(LIST_TYPE_3EWL); - pDimensionRegions[0] = new DimensionRegion(_3ewl); + pDimensionRegions[0] = new DimensionRegion(this, _3ewl); DimensionRegions = 1; } } @@ -2135,6 +2450,12 @@ * @throws gig::Exception if samples cannot be dereferenced */ void Region::UpdateChunks() { + // in the gig format we don't care about the Region's sample reference + // but we still have to provide some existing one to not corrupt the + // file, so to avoid the latter we simply always assign the sample of + // the first dimension region of this region + pSample = pDimensionRegions[0]->pSample; + // first update base class's chunks DLS::Region::UpdateChunks(); @@ -2144,28 +2465,38 @@ } File* pFile = (File*) GetParent()->GetParent(); - const int iMaxDimensions = (pFile->pVersion && pFile->pVersion->major == 3) ? 8 : 5; - const int iMaxDimensionRegions = (pFile->pVersion && pFile->pVersion->major == 3) ? 256 : 32; + bool version3 = pFile->pVersion && pFile->pVersion->major == 3; + const int iMaxDimensions = version3 ? 8 : 5; + const int iMaxDimensionRegions = version3 ? 256 : 32; // make sure '3lnk' chunk exists RIFF::Chunk* _3lnk = pCkRegion->GetSubChunk(CHUNK_ID_3LNK); if (!_3lnk) { - const int _3lnkChunkSize = (pFile->pVersion && pFile->pVersion->major == 3) ? 1092 : 172; + const int _3lnkChunkSize = version3 ? 1092 : 172; _3lnk = pCkRegion->AddSubChunk(CHUNK_ID_3LNK, _3lnkChunkSize); + memset(_3lnk->LoadChunkData(), 0, _3lnkChunkSize); + + // move 3prg to last position + pCkRegion->MoveSubChunk(pCkRegion->GetSubList(LIST_TYPE_3PRG), 0); } // update dimension definitions in '3lnk' chunk uint8_t* pData = (uint8_t*) _3lnk->LoadChunkData(); + store32(&pData[0], DimensionRegions); + int shift = 0; for (int i = 0; i < iMaxDimensions; i++) { - pData[i * 8] = (uint8_t) pDimensionDefinitions[i].dimension; - pData[i * 8 + 1] = pDimensionDefinitions[i].bits; - // next 2 bytes unknown - pData[i * 8 + 4] = pDimensionDefinitions[i].zones; - // next 3 bytes unknown + pData[4 + i * 8] = (uint8_t) pDimensionDefinitions[i].dimension; + pData[5 + i * 8] = pDimensionDefinitions[i].bits; + pData[6 + i * 8] = pDimensionDefinitions[i].dimension == dimension_none ? 0 : shift; + pData[7 + i * 8] = (1 << (shift + pDimensionDefinitions[i].bits)) - (1 << shift); + pData[8 + i * 8] = pDimensionDefinitions[i].zones; + // next 3 bytes unknown, always zero? + + shift += pDimensionDefinitions[i].bits; } // update wave pool table in '3lnk' chunk - const int iWavePoolOffset = (pFile->pVersion && pFile->pVersion->major == 3) ? 68 : 44; + const int iWavePoolOffset = version3 ? 68 : 44; for (uint i = 0; i < iMaxDimensionRegions; i++) { int iWaveIndex = -1; if (i < DimensionRegions) { @@ -2178,9 +2509,8 @@ break; } } - if (iWaveIndex < 0) throw gig::Exception("Could not update gig::Region, could not find DimensionRegion's sample"); } - memcpy(&pData[iWavePoolOffset + i * 4], &iWaveIndex, 4); + store32(&pData[iWavePoolOffset + i * 4], iWaveIndex); } } @@ -2191,7 +2521,7 @@ RIFF::List* _3ewl = _3prg->GetFirstSubList(); while (_3ewl) { if (_3ewl->GetListType() == LIST_TYPE_3EWL) { - pDimensionRegions[dimensionRegionNr] = new DimensionRegion(_3ewl); + pDimensionRegions[dimensionRegionNr] = new DimensionRegion(this, _3ewl); dimensionRegionNr++; } _3ewl = _3prg->GetNextSubList(); @@ -2200,6 +2530,13 @@ } } + void Region::SetKeyRange(uint16_t Low, uint16_t High) { + // update KeyRange struct and make sure regions are in correct order + DLS::Region::SetKeyRange(Low, High); + // update Region key table for fast lookup + ((gig::Instrument*)GetParent())->UpdateRegionKeyTable(); + } + void Region::UpdateVelocityTable() { // get velocity dimension's index int veldim = -1; @@ -2220,7 +2557,8 @@ int dim[8] = { 0 }; for (int i = 0 ; i < DimensionRegions ; i++) { - if (pDimensionRegions[i]->VelocityUpperLimit) { + if (pDimensionRegions[i]->DimensionUpperLimits[veldim] || + pDimensionRegions[i]->VelocityUpperLimit) { // create the velocity table uint8_t* table = pDimensionRegions[i]->VelocityTable; if (!table) { @@ -2229,10 +2567,18 @@ } int tableidx = 0; int velocityZone = 0; - for (int k = i ; k < end ; k += step) { - DimensionRegion *d = pDimensionRegions[k]; - for (; tableidx <= d->VelocityUpperLimit ; tableidx++) table[tableidx] = velocityZone; - velocityZone++; + if (pDimensionRegions[i]->DimensionUpperLimits[veldim]) { // gig3 + for (int k = i ; k < end ; k += step) { + DimensionRegion *d = pDimensionRegions[k]; + for (; tableidx <= d->DimensionUpperLimits[veldim] ; tableidx++) table[tableidx] = velocityZone; + velocityZone++; + } + } else { // gig2 + for (int k = i ; k < end ; k += step) { + DimensionRegion *d = pDimensionRegions[k]; + for (; tableidx <= d->VelocityUpperLimit ; tableidx++) table[tableidx] = velocityZone; + velocityZone++; + } } } else { if (pDimensionRegions[i]->VelocityTable) { @@ -2296,15 +2642,65 @@ if (pDimensionDefinitions[i].dimension == pDimDef->dimension) throw gig::Exception("Could not add new dimension, there is already a dimension of the same type"); + // pos is where the new dimension should be placed, normally + // last in list, except for the samplechannel dimension which + // has to be first in list + int pos = pDimDef->dimension == dimension_samplechannel ? 0 : Dimensions; + int bitpos = 0; + for (int i = 0 ; i < pos ; i++) + bitpos += pDimensionDefinitions[i].bits; + + // make room for the new dimension + for (int i = Dimensions ; i > pos ; i--) pDimensionDefinitions[i] = pDimensionDefinitions[i - 1]; + for (int i = 0 ; i < (1 << iCurrentBits) ; i++) { + for (int j = Dimensions ; j > pos ; j--) { + pDimensionRegions[i]->DimensionUpperLimits[j] = + pDimensionRegions[i]->DimensionUpperLimits[j - 1]; + } + } + // assign definition of new dimension - pDimensionDefinitions[Dimensions] = *pDimDef; + pDimensionDefinitions[pos] = *pDimDef; - // create new dimension region(s) for this new dimension - for (int i = 1 << iCurrentBits; i < 1 << iNewBits; i++) { - //TODO: maybe we should copy existing dimension regions if possible instead of simply creating new ones with default values - RIFF::List* pNewDimRgnListChunk = pCkRegion->AddSubList(LIST_TYPE_3EWL); - pDimensionRegions[i] = new DimensionRegion(pNewDimRgnListChunk); - DimensionRegions++; + // auto correct certain dimension definition fields (where possible) + pDimensionDefinitions[pos].split_type = + __resolveSplitType(pDimensionDefinitions[pos].dimension); + pDimensionDefinitions[pos].zone_size = + __resolveZoneSize(pDimensionDefinitions[pos]); + + // create new dimension region(s) for this new dimension, and make + // sure that the dimension regions are placed correctly in both the + // RIFF list and the pDimensionRegions array + RIFF::Chunk* moveTo = NULL; + RIFF::List* _3prg = pCkRegion->GetSubList(LIST_TYPE_3PRG); + for (int i = (1 << iCurrentBits) - (1 << bitpos) ; i >= 0 ; i -= (1 << bitpos)) { + for (int k = 0 ; k < (1 << bitpos) ; k++) { + pDimensionRegions[(i << pDimDef->bits) + k] = pDimensionRegions[i + k]; + } + for (int j = 1 ; j < (1 << pDimDef->bits) ; j++) { + for (int k = 0 ; k < (1 << bitpos) ; k++) { + RIFF::List* pNewDimRgnListChunk = _3prg->AddSubList(LIST_TYPE_3EWL); + if (moveTo) _3prg->MoveSubChunk(pNewDimRgnListChunk, moveTo); + // create a new dimension region and copy all parameter values from + // an existing dimension region + pDimensionRegions[(i << pDimDef->bits) + (j << bitpos) + k] = + new DimensionRegion(pNewDimRgnListChunk, *pDimensionRegions[i + k]); + + DimensionRegions++; + } + } + moveTo = pDimensionRegions[i]->pParentList; + } + + // initialize the upper limits for this dimension + int mask = (1 << bitpos) - 1; + for (int z = 0 ; z < pDimDef->zones ; z++) { + uint8_t upperLimit = uint8_t((z + 1) * 128.0 / pDimDef->zones - 1); + for (int i = 0 ; i < 1 << iCurrentBits ; i++) { + pDimensionRegions[((i & ~mask) << pDimDef->bits) | + (z << bitpos) | + (i & mask)]->DimensionUpperLimits[pos] = upperLimit; + } } Dimensions++; @@ -2347,6 +2743,8 @@ for (int i = iDimensionNr + 1; i < Dimensions; i++) iUpperBits += pDimensionDefinitions[i].bits; + RIFF::List* _3prg = pCkRegion->GetSubList(LIST_TYPE_3PRG); + // delete dimension regions which belong to the given dimension // (that is where the dimension's bit > 0) for (int iUpperBit = 0; iUpperBit < 1 << iUpperBits; iUpperBit++) { @@ -2355,6 +2753,8 @@ int iToDelete = iUpperBit << (pDimensionDefinitions[iDimensionNr].bits + iLowerBits) | iObsoleteBit << iLowerBits | iLowerBit; + + _3prg->DeleteSubChunk(pDimensionRegions[iToDelete]->pParentList); delete pDimensionRegions[iToDelete]; pDimensionRegions[iToDelete] = NULL; DimensionRegions--; @@ -2375,6 +2775,15 @@ } } + // remove the this dimension from the upper limits arrays + for (int j = 0 ; j < 256 && pDimensionRegions[j] ; j++) { + DimensionRegion* d = pDimensionRegions[j]; + for (int i = iDimensionNr + 1; i < Dimensions; i++) { + d->DimensionUpperLimits[i - 1] = d->DimensionUpperLimits[i]; + } + d->DimensionUpperLimits[Dimensions - 1] = 127; + } + // 'remove' dimension definition for (int i = iDimensionNr + 1; i < Dimensions; i++) { pDimensionDefinitions[i - 1] = pDimensionDefinitions[i]; @@ -2427,7 +2836,15 @@ } else { switch (pDimensionDefinitions[i].split_type) { case split_type_normal: - bits = uint8_t(DimValues[i] / pDimensionDefinitions[i].zone_size); + if (pDimensionRegions[0]->DimensionUpperLimits[i]) { + // gig3: all normal dimensions (not just the velocity dimension) have custom zone ranges + for (bits = 0 ; bits < pDimensionDefinitions[i].zones ; bits++) { + if (DimValues[i] <= pDimensionRegions[bits << bitpos]->DimensionUpperLimits[i]) break; + } + } else { + // gig2: evenly sized zones + bits = uint8_t(DimValues[i] / pDimensionDefinitions[i].zone_size); + } break; case split_type_bit: // the value is already the sought dimension bit number const uint8_t limiter_mask = (0xff << pDimensionDefinitions[i].bits) ^ 0xff; @@ -2441,7 +2858,7 @@ DimensionRegion* dimreg = pDimensionRegions[dimregidx]; if (veldim != -1) { // (dimreg is now the dimension region for the lowest velocity) - if (dimreg->VelocityUpperLimit) // custom defined zone ranges + if (dimreg->VelocityTable) // custom defined zone ranges bits = dimreg->VelocityTable[DimValues[veldim]]; else // normal split type bits = uint8_t(DimValues[veldim] / pDimensionDefinitions[veldim].zone_size); @@ -2489,12 +2906,13 @@ Sample* Region::GetSampleFromWavePool(unsigned int WavePoolTableIndex, progress_t* pProgress) { if ((int32_t)WavePoolTableIndex == -1) return NULL; File* file = (File*) GetParent()->GetParent(); + if (!file->pWavePoolTable) return NULL; unsigned long soughtoffset = file->pWavePoolTable[WavePoolTableIndex]; unsigned long soughtfileno = file->pWavePoolTableHi[WavePoolTableIndex]; Sample* sample = file->GetFirstSample(pProgress); while (sample) { if (sample->ulWavePoolOffset == soughtoffset && - sample->FileNo == soughtfileno) return static_cast(pSample = sample); + sample->FileNo == soughtfileno) return static_cast(sample); sample = file->GetNextSample(); } return NULL; @@ -2506,8 +2924,22 @@ // * Instrument::Instrument(File* pFile, RIFF::List* insList, progress_t* pProgress) : DLS::Instrument((DLS::File*)pFile, insList) { + static const DLS::Info::FixedStringLength fixedStringLengths[] = { + { CHUNK_ID_INAM, 64 }, + { CHUNK_ID_ISFT, 12 }, + { 0, 0 } + }; + pInfo->FixedStringLengths = fixedStringLengths; + // Initialization for (int i = 0; i < 128; i++) RegionKeyTable[i] = NULL; + EffectSend = 0; + Attenuation = 0; + FineTune = 0; + PitchbendRange = 0; + PianoReleaseMode = false; + DimensionKeyRange.low = 0; + DimensionKeyRange.high = 0; // Loading RIFF::List* lart = insList->GetSubList(LIST_TYPE_LART); @@ -2544,6 +2976,7 @@ } void Instrument::UpdateRegionKeyTable() { + for (int i = 0; i < 128; i++) RegionKeyTable[i] = NULL; RegionList::iterator iter = pRegions->begin(); RegionList::iterator end = pRegions->end(); for (; iter != end; ++iter) { @@ -2583,17 +3016,24 @@ if (!lart) lart = pCkInstrument->AddSubList(LIST_TYPE_LART); // make sure '3ewg' RIFF chunk exists RIFF::Chunk* _3ewg = lart->GetSubChunk(CHUNK_ID_3EWG); - if (!_3ewg) _3ewg = lart->AddSubChunk(CHUNK_ID_3EWG, 12); + if (!_3ewg) { + File* pFile = (File*) GetParent(); + + // 3ewg is bigger in gig3, as it includes the iMIDI rules + int size = (pFile->pVersion && pFile->pVersion->major == 3) ? 16416 : 12; + _3ewg = lart->AddSubChunk(CHUNK_ID_3EWG, size); + memset(_3ewg->LoadChunkData(), 0, size); + } // update '3ewg' RIFF chunk uint8_t* pData = (uint8_t*) _3ewg->LoadChunkData(); - memcpy(&pData[0], &EffectSend, 2); - memcpy(&pData[2], &Attenuation, 4); - memcpy(&pData[6], &FineTune, 2); - memcpy(&pData[8], &PitchbendRange, 2); - const uint8_t dimkeystart = (PianoReleaseMode) ? 0x01 : 0x00 | + store16(&pData[0], EffectSend); + store32(&pData[2], Attenuation); + store16(&pData[6], FineTune); + store16(&pData[8], PitchbendRange); + const uint8_t dimkeystart = (PianoReleaseMode ? 0x01 : 0x00) | DimensionKeyRange.low << 1; - memcpy(&pData[10], &dimkeystart, 1); - memcpy(&pData[11], &DimensionKeyRange.high, 1); + pData[10] = dimkeystart; + pData[11] = DimensionKeyRange.high; } /** @@ -2604,7 +3044,7 @@ * there is no Region defined for the given \a Key */ Region* Instrument::GetRegion(unsigned int Key) { - if (!pRegions || !pRegions->size() || Key > 127) return NULL; + if (!pRegions || pRegions->empty() || Key > 127) return NULL; return RegionKeyTable[Key]; /*for (int i = 0; i < Regions; i++) { @@ -2664,13 +3104,190 @@ +// *************** Group *************** +// * + + /** @brief Constructor. + * + * @param file - pointer to the gig::File object + * @param ck3gnm - pointer to 3gnm chunk associated with this group or + * NULL if this is a new Group + */ + Group::Group(File* file, RIFF::Chunk* ck3gnm) { + pFile = file; + pNameChunk = ck3gnm; + ::LoadString(pNameChunk, Name); + } + + Group::~Group() { + // remove the chunk associated with this group (if any) + if (pNameChunk) pNameChunk->GetParent()->DeleteSubChunk(pNameChunk); + } + + /** @brief Update chunks with current group settings. + * + * Apply current Group field values to the respective chunks. You have + * to call File::Save() to make changes persistent. + * + * Usually there is absolutely no need to call this method explicitly. + * It will be called automatically when File::Save() was called. + */ + void Group::UpdateChunks() { + // make sure <3gri> and <3gnl> list chunks exist + RIFF::List* _3gri = pFile->pRIFF->GetSubList(LIST_TYPE_3GRI); + if (!_3gri) { + _3gri = pFile->pRIFF->AddSubList(LIST_TYPE_3GRI); + pFile->pRIFF->MoveSubChunk(_3gri, pFile->pRIFF->GetSubChunk(CHUNK_ID_PTBL)); + } + RIFF::List* _3gnl = _3gri->GetSubList(LIST_TYPE_3GNL); + if (!_3gnl) _3gnl = _3gri->AddSubList(LIST_TYPE_3GNL); + + if (!pNameChunk && pFile->pVersion && pFile->pVersion->major == 3) { + // v3 has a fixed list of 128 strings, find a free one + for (RIFF::Chunk* ck = _3gnl->GetFirstSubChunk() ; ck ; ck = _3gnl->GetNextSubChunk()) { + if (strcmp(static_cast(ck->LoadChunkData()), "") == 0) { + pNameChunk = ck; + break; + } + } + } + + // now store the name of this group as <3gnm> chunk as subchunk of the <3gnl> list chunk + ::SaveString(CHUNK_ID_3GNM, pNameChunk, _3gnl, Name, String("Unnamed Group"), true, 64); + } + + /** + * Returns the first Sample of this Group. You have to call this method + * once before you use GetNextSample(). + * + * Notice: this method might block for a long time, in case the + * samples of this .gig file were not scanned yet + * + * @returns pointer address to first Sample or NULL if there is none + * applied to this Group + * @see GetNextSample() + */ + Sample* Group::GetFirstSample() { + // FIXME: lazy und unsafe implementation, should be an autonomous iterator + for (Sample* pSample = pFile->GetFirstSample(); pSample; pSample = pFile->GetNextSample()) { + if (pSample->GetGroup() == this) return pSample; + } + return NULL; + } + + /** + * Returns the next Sample of the Group. You have to call + * GetFirstSample() once before you can use this method. By calling this + * method multiple times it iterates through the Samples assigned to + * this Group. + * + * @returns pointer address to the next Sample of this Group or NULL if + * end reached + * @see GetFirstSample() + */ + Sample* Group::GetNextSample() { + // FIXME: lazy und unsafe implementation, should be an autonomous iterator + for (Sample* pSample = pFile->GetNextSample(); pSample; pSample = pFile->GetNextSample()) { + if (pSample->GetGroup() == this) return pSample; + } + return NULL; + } + + /** + * Move Sample given by \a pSample from another Group to this Group. + */ + void Group::AddSample(Sample* pSample) { + pSample->pGroup = this; + } + + /** + * Move all members of this group to another group (preferably the 1st + * one except this). This method is called explicitly by + * File::DeleteGroup() thus when a Group was deleted. This code was + * intentionally not placed in the destructor! + */ + void Group::MoveAll() { + // get "that" other group first + Group* pOtherGroup = NULL; + for (pOtherGroup = pFile->GetFirstGroup(); pOtherGroup; pOtherGroup = pFile->GetNextGroup()) { + if (pOtherGroup != this) break; + } + if (!pOtherGroup) throw Exception( + "Could not move samples to another group, since there is no " + "other Group. This is a bug, report it!" + ); + // now move all samples of this group to the other group + for (Sample* pSample = GetFirstSample(); pSample; pSample = GetNextSample()) { + pOtherGroup->AddSample(pSample); + } + } + + + // *************** File *************** // * + /// Reflects Gigasampler file format version 2.0 (1998-06-28). + const DLS::version_t File::VERSION_2 = { + 0, 2, 19980628 & 0xffff, 19980628 >> 16 + }; + + /// Reflects Gigasampler file format version 3.0 (2003-03-31). + const DLS::version_t File::VERSION_3 = { + 0, 3, 20030331 & 0xffff, 20030331 >> 16 + }; + + const DLS::Info::FixedStringLength File::FixedStringLengths[] = { + { CHUNK_ID_IARL, 256 }, + { CHUNK_ID_IART, 128 }, + { CHUNK_ID_ICMS, 128 }, + { CHUNK_ID_ICMT, 1024 }, + { CHUNK_ID_ICOP, 128 }, + { CHUNK_ID_ICRD, 128 }, + { CHUNK_ID_IENG, 128 }, + { CHUNK_ID_IGNR, 128 }, + { CHUNK_ID_IKEY, 128 }, + { CHUNK_ID_IMED, 128 }, + { CHUNK_ID_INAM, 128 }, + { CHUNK_ID_IPRD, 128 }, + { CHUNK_ID_ISBJ, 128 }, + { CHUNK_ID_ISFT, 128 }, + { CHUNK_ID_ISRC, 128 }, + { CHUNK_ID_ISRF, 128 }, + { CHUNK_ID_ITCH, 128 }, + { 0, 0 } + }; + File::File() : DLS::File() { + *pVersion = VERSION_3; + pGroups = NULL; + pInfo->FixedStringLengths = FixedStringLengths; + pInfo->ArchivalLocation = String(256, ' '); + + // add some mandatory chunks to get the file chunks in right + // order (INFO chunk will be moved to first position later) + pRIFF->AddSubChunk(CHUNK_ID_VERS, 8); + pRIFF->AddSubChunk(CHUNK_ID_COLH, 4); + pRIFF->AddSubChunk(CHUNK_ID_DLID, 16); + + GenerateDLSID(); } File::File(RIFF::File* pRIFF) : DLS::File(pRIFF) { + pGroups = NULL; + pInfo->FixedStringLengths = FixedStringLengths; + } + + File::~File() { + if (pGroups) { + std::list::iterator iter = pGroups->begin(); + std::list::iterator end = pGroups->end(); + while (iter != end) { + delete *iter; + ++iter; + } + delete pGroups; + } } Sample* File::GetFirstSample(progress_t* pProgress) { @@ -2700,14 +3317,20 @@ // create new Sample object and its respective 'wave' list chunk RIFF::List* wave = wvpl->AddSubList(LIST_TYPE_WAVE); Sample* pSample = new Sample(this, wave, 0 /*arbitrary value, we update offsets when we save*/); + + // add mandatory chunks to get the chunks in right order + wave->AddSubChunk(CHUNK_ID_FMT, 16); + wave->AddSubList(LIST_TYPE_INFO); + pSamples->push_back(pSample); return pSample; } /** @brief Delete a sample. * - * This will delete the given Sample object from the gig file. You have - * to call Save() to make this persistent to the file. + * This will delete the given Sample object from the gig file. Any + * references to this sample from Regions and DimensionRegions will be + * removed. You have to call Save() to make this persistent to the file. * * @param pSample - sample to delete * @throws gig::Exception if given sample could not be found @@ -2716,8 +3339,24 @@ if (!pSamples || !pSamples->size()) throw gig::Exception("Could not delete sample as there are no samples"); SampleList::iterator iter = find(pSamples->begin(), pSamples->end(), (DLS::Sample*) pSample); if (iter == pSamples->end()) throw gig::Exception("Could not delete sample, could not find given sample"); + if (SamplesIterator != pSamples->end() && *SamplesIterator == pSample) ++SamplesIterator; // avoid iterator invalidation pSamples->erase(iter); delete pSample; + + // remove all references to the sample + for (Instrument* instrument = GetFirstInstrument() ; instrument ; + instrument = GetNextInstrument()) { + for (Region* region = instrument->GetFirstRegion() ; region ; + region = instrument->GetNextRegion()) { + + if (region->GetSample() == pSample) region->SetSample(NULL); + + for (int i = 0 ; i < region->DimensionRegions ; i++) { + gig::DimensionRegion *d = region->pDimensionRegions[i]; + if (d->pSample == pSample) d->pSample = NULL; + } + } + } } void File::LoadSamples() { @@ -2725,6 +3364,10 @@ } void File::LoadSamples(progress_t* pProgress) { + // Groups must be loaded before samples, because samples will try + // to resolve the group they belong to + if (!pGroups) LoadGroups(); + if (!pSamples) pSamples = new SampleList; RIFF::File* file = pRIFF; @@ -2837,7 +3480,19 @@ __ensureMandatoryChunksExist(); RIFF::List* lstInstruments = pRIFF->GetSubList(LIST_TYPE_LINS); RIFF::List* lstInstr = lstInstruments->AddSubList(LIST_TYPE_INS); + + // add mandatory chunks to get the chunks in right order + lstInstr->AddSubList(LIST_TYPE_INFO); + lstInstr->AddSubChunk(CHUNK_ID_DLID, 16); + Instrument* pInstrument = new Instrument(this, lstInstr); + pInstrument->GenerateDLSID(); + + lstInstr->AddSubChunk(CHUNK_ID_INSH, 12); + + // this string is needed for the gig to be loadable in GSt: + pInstrument->pInfo->Software = "Endless Wave"; + pInstruments->push_back(pInstrument); return pInstrument; } @@ -2848,7 +3503,7 @@ * have to call Save() to make this persistent to the file. * * @param pInstrument - instrument to delete - * @throws gig::Excption if given instrument could not be found + * @throws gig::Exception if given instrument could not be found */ void File::DeleteInstrument(Instrument* pInstrument) { if (!pInstruments) throw gig::Exception("Could not delete instrument as there are no instruments"); @@ -2888,6 +3543,317 @@ } } + /// Updates the 3crc chunk with the checksum of a sample. The + /// update is done directly to disk, as this method is called + /// after File::Save() + void File::SetSampleChecksum(Sample* pSample, uint32_t crc) { + RIFF::Chunk* _3crc = pRIFF->GetSubChunk(CHUNK_ID_3CRC); + if (!_3crc) return; + + // get the index of the sample + int iWaveIndex = -1; + File::SampleList::iterator iter = pSamples->begin(); + File::SampleList::iterator end = pSamples->end(); + for (int index = 0; iter != end; ++iter, ++index) { + if (*iter == pSample) { + iWaveIndex = index; + break; + } + } + if (iWaveIndex < 0) throw gig::Exception("Could not update crc, could not find sample"); + + // write the CRC-32 checksum to disk + _3crc->SetPos(iWaveIndex * 8); + uint32_t tmp = 1; + _3crc->WriteUint32(&tmp); // unknown, always 1? + _3crc->WriteUint32(&crc); + } + + Group* File::GetFirstGroup() { + if (!pGroups) LoadGroups(); + // there must always be at least one group + GroupsIterator = pGroups->begin(); + return *GroupsIterator; + } + + Group* File::GetNextGroup() { + if (!pGroups) return NULL; + ++GroupsIterator; + return (GroupsIterator == pGroups->end()) ? NULL : *GroupsIterator; + } + + /** + * Returns the group with the given index. + * + * @param index - number of the sought group (0..n) + * @returns sought group or NULL if there's no such group + */ + Group* File::GetGroup(uint index) { + if (!pGroups) LoadGroups(); + GroupsIterator = pGroups->begin(); + for (uint i = 0; GroupsIterator != pGroups->end(); i++) { + if (i == index) return *GroupsIterator; + ++GroupsIterator; + } + return NULL; + } + + Group* File::AddGroup() { + if (!pGroups) LoadGroups(); + // there must always be at least one group + __ensureMandatoryChunksExist(); + Group* pGroup = new Group(this, NULL); + pGroups->push_back(pGroup); + return pGroup; + } + + /** @brief Delete a group and its samples. + * + * This will delete the given Group object and all the samples that + * belong to this group from the gig file. You have to call Save() to + * make this persistent to the file. + * + * @param pGroup - group to delete + * @throws gig::Exception if given group could not be found + */ + void File::DeleteGroup(Group* pGroup) { + if (!pGroups) LoadGroups(); + std::list::iterator iter = find(pGroups->begin(), pGroups->end(), pGroup); + if (iter == pGroups->end()) throw gig::Exception("Could not delete group, could not find given group"); + if (pGroups->size() == 1) throw gig::Exception("Cannot delete group, there must be at least one default group!"); + // delete all members of this group + for (Sample* pSample = pGroup->GetFirstSample(); pSample; pSample = pGroup->GetNextSample()) { + DeleteSample(pSample); + } + // now delete this group object + pGroups->erase(iter); + delete pGroup; + } + + /** @brief Delete a group. + * + * This will delete the given Group object from the gig file. All the + * samples that belong to this group will not be deleted, but instead + * be moved to another group. You have to call Save() to make this + * persistent to the file. + * + * @param pGroup - group to delete + * @throws gig::Exception if given group could not be found + */ + void File::DeleteGroupOnly(Group* pGroup) { + if (!pGroups) LoadGroups(); + std::list::iterator iter = find(pGroups->begin(), pGroups->end(), pGroup); + if (iter == pGroups->end()) throw gig::Exception("Could not delete group, could not find given group"); + if (pGroups->size() == 1) throw gig::Exception("Cannot delete group, there must be at least one default group!"); + // move all members of this group to another group + pGroup->MoveAll(); + pGroups->erase(iter); + delete pGroup; + } + + void File::LoadGroups() { + if (!pGroups) pGroups = new std::list; + // try to read defined groups from file + RIFF::List* lst3gri = pRIFF->GetSubList(LIST_TYPE_3GRI); + if (lst3gri) { + RIFF::List* lst3gnl = lst3gri->GetSubList(LIST_TYPE_3GNL); + if (lst3gnl) { + RIFF::Chunk* ck = lst3gnl->GetFirstSubChunk(); + while (ck) { + if (ck->GetChunkID() == CHUNK_ID_3GNM) { + if (pVersion && pVersion->major == 3 && + strcmp(static_cast(ck->LoadChunkData()), "") == 0) break; + + pGroups->push_back(new Group(this, ck)); + } + ck = lst3gnl->GetNextSubChunk(); + } + } + } + // if there were no group(s), create at least the mandatory default group + if (!pGroups->size()) { + Group* pGroup = new Group(this, NULL); + pGroup->Name = "Default Group"; + pGroups->push_back(pGroup); + } + } + + /** + * Apply all the gig file's current instruments, samples, groups and settings + * to the respective RIFF chunks. You have to call Save() to make changes + * persistent. + * + * Usually there is absolutely no need to call this method explicitly. + * It will be called automatically when File::Save() was called. + * + * @throws Exception - on errors + */ + void File::UpdateChunks() { + bool newFile = pRIFF->GetSubList(LIST_TYPE_INFO) == NULL; + + b64BitWavePoolOffsets = pVersion && pVersion->major == 3; + + // first update base class's chunks + DLS::File::UpdateChunks(); + + if (newFile) { + // INFO was added by Resource::UpdateChunks - make sure it + // is placed first in file + RIFF::Chunk* info = pRIFF->GetSubList(LIST_TYPE_INFO); + RIFF::Chunk* first = pRIFF->GetFirstSubChunk(); + if (first != info) { + pRIFF->MoveSubChunk(info, first); + } + } + + // update group's chunks + if (pGroups) { + std::list::iterator iter = pGroups->begin(); + std::list::iterator end = pGroups->end(); + for (; iter != end; ++iter) { + (*iter)->UpdateChunks(); + } + + // v3: make sure the file has 128 3gnm chunks + if (pVersion && pVersion->major == 3) { + RIFF::List* _3gnl = pRIFF->GetSubList(LIST_TYPE_3GRI)->GetSubList(LIST_TYPE_3GNL); + RIFF::Chunk* _3gnm = _3gnl->GetFirstSubChunk(); + for (int i = 0 ; i < 128 ; i++) { + if (i >= pGroups->size()) ::SaveString(CHUNK_ID_3GNM, _3gnm, _3gnl, "", "", true, 64); + if (_3gnm) _3gnm = _3gnl->GetNextSubChunk(); + } + } + } + + // update einf chunk + + // The einf chunk contains statistics about the gig file, such + // as the number of regions and samples used by each + // instrument. It is divided in equally sized parts, where the + // first part contains information about the whole gig file, + // and the rest of the parts map to each instrument in the + // file. + // + // At the end of each part there is a bit map of each sample + // in the file, where a set bit means that the sample is used + // by the file/instrument. + // + // Note that there are several fields with unknown use. These + // are set to zero. + + int sublen = pSamples->size() / 8 + 49; + int einfSize = (Instruments + 1) * sublen; + + RIFF::Chunk* einf = pRIFF->GetSubChunk(CHUNK_ID_EINF); + if (einf) { + if (einf->GetSize() != einfSize) { + einf->Resize(einfSize); + memset(einf->LoadChunkData(), 0, einfSize); + } + } else if (newFile) { + einf = pRIFF->AddSubChunk(CHUNK_ID_EINF, einfSize); + } + if (einf) { + uint8_t* pData = (uint8_t*) einf->LoadChunkData(); + + std::map sampleMap; + int sampleIdx = 0; + for (Sample* pSample = GetFirstSample(); pSample; pSample = GetNextSample()) { + sampleMap[pSample] = sampleIdx++; + } + + int totnbusedsamples = 0; + int totnbusedchannels = 0; + int totnbregions = 0; + int totnbdimregions = 0; + int totnbloops = 0; + int instrumentIdx = 0; + + memset(&pData[48], 0, sublen - 48); + + for (Instrument* instrument = GetFirstInstrument() ; instrument ; + instrument = GetNextInstrument()) { + int nbusedsamples = 0; + int nbusedchannels = 0; + int nbdimregions = 0; + int nbloops = 0; + + memset(&pData[(instrumentIdx + 1) * sublen + 48], 0, sublen - 48); + + for (Region* region = instrument->GetFirstRegion() ; region ; + region = instrument->GetNextRegion()) { + for (int i = 0 ; i < region->DimensionRegions ; i++) { + gig::DimensionRegion *d = region->pDimensionRegions[i]; + if (d->pSample) { + int sampleIdx = sampleMap[d->pSample]; + int byte = 48 + sampleIdx / 8; + int bit = 1 << (sampleIdx & 7); + if ((pData[(instrumentIdx + 1) * sublen + byte] & bit) == 0) { + pData[(instrumentIdx + 1) * sublen + byte] |= bit; + nbusedsamples++; + nbusedchannels += d->pSample->Channels; + + if ((pData[byte] & bit) == 0) { + pData[byte] |= bit; + totnbusedsamples++; + totnbusedchannels += d->pSample->Channels; + } + } + } + if (d->SampleLoops) nbloops++; + } + nbdimregions += region->DimensionRegions; + } + // first 4 bytes unknown - sometimes 0, sometimes length of einf part + // store32(&pData[(instrumentIdx + 1) * sublen], sublen); + store32(&pData[(instrumentIdx + 1) * sublen + 4], nbusedchannels); + store32(&pData[(instrumentIdx + 1) * sublen + 8], nbusedsamples); + store32(&pData[(instrumentIdx + 1) * sublen + 12], 1); + store32(&pData[(instrumentIdx + 1) * sublen + 16], instrument->Regions); + store32(&pData[(instrumentIdx + 1) * sublen + 20], nbdimregions); + store32(&pData[(instrumentIdx + 1) * sublen + 24], nbloops); + // next 8 bytes unknown + store32(&pData[(instrumentIdx + 1) * sublen + 36], instrumentIdx); + store32(&pData[(instrumentIdx + 1) * sublen + 40], pSamples->size()); + // next 4 bytes unknown + + totnbregions += instrument->Regions; + totnbdimregions += nbdimregions; + totnbloops += nbloops; + instrumentIdx++; + } + // first 4 bytes unknown - sometimes 0, sometimes length of einf part + // store32(&pData[0], sublen); + store32(&pData[4], totnbusedchannels); + store32(&pData[8], totnbusedsamples); + store32(&pData[12], Instruments); + store32(&pData[16], totnbregions); + store32(&pData[20], totnbdimregions); + store32(&pData[24], totnbloops); + // next 8 bytes unknown + // next 4 bytes unknown, not always 0 + store32(&pData[40], pSamples->size()); + // next 4 bytes unknown + } + + // update 3crc chunk + + // The 3crc chunk contains CRC-32 checksums for the + // samples. The actual checksum values will be filled in + // later, by Sample::Write. + + RIFF::Chunk* _3crc = pRIFF->GetSubChunk(CHUNK_ID_3CRC); + if (_3crc) { + _3crc->Resize(pSamples->size() * 8); + } else if (newFile) { + _3crc = pRIFF->AddSubChunk(CHUNK_ID_3CRC, pSamples->size() * 8); + _3crc->LoadChunkData(); + + // the order of einf and 3crc is not the same in v2 and v3 + if (einf && pVersion && pVersion->major == 3) pRIFF->MoveSubChunk(_3crc, einf); + } + } + // *************** Exception ***************