/* Copyright (c) 2020 Alex Diener This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Alex Diener alex@ludobloom.com */ #include "audiolab/AudioRecipe.h" #include #include #define stemobject_implementation AudioRecipe stemobject_vtable_begin(); stemobject_vtable_entry(dispose); stemobject_vtable_end(); AudioRecipe * AudioRecipe_create(void) { stemobject_create_implementation(init) } bool AudioRecipe_init(AudioRecipe * self) { call_super(init, self); stemobject_init_preamble(); self->outputFile = NULL; self->outputFormat = OUTPUT_FORMAT_UNSPECIFIED; self->channelCount = 2; self->bytesPerSample = 2; self->sampleRate = 44100; self->commandCount = 0; self->commands = NULL; return true; } void AudioRecipe_dispose(AudioRecipe * self) { free(self->outputFile); free(self->commands); call_super(dispose, self); } AudioRecipe * AudioRecipe_deserialize(compat_type(DeserializationContext *) deserializationContext) { stemobject_deserialize_implementation(loadSerializedData) } static const char * getCommandSerializationName(AudioRecipe_commandType command) { switch (command) { case COMMAND_MIX: return "mix"; case COMMAND_AMPLIFY: return "amplify"; case COMMAND_NORMALIZE: return "normalize"; case COMMAND_STRETCH: return "stretch"; case COMMAND_REVERSE: return "reverse"; case COMMAND_DELETE: return "delete"; case COMMAND_FADE: return "fade"; case COMMAND_EQUALIZE: return "equalize"; case COMMAND_CHANGE_SPEED: return "change_speed"; case COMMAND_CHANGE_PITCH: return "change_pitch"; case COMMAND_SILENCE: return "silence"; case COMMAND_TONE: return "tone"; case COMMAND_NOISE: return "noise"; } return NULL; } bool AudioRecipe_loadSerializedData(AudioRecipe * self, compat_type(DeserializationContext *) deserializationContext) { DeserializationContext * context = deserializationContext; const char * formatType; uint16_t formatVersion; const char * commandString, * nameString; call_virtual(beginStructure, context, AUDIO_RECIPE_FORMAT_TYPE); formatType = call_virtual(readString, context, "format_type"); if (formatType == NULL || strcmp(formatType, AUDIO_RECIPE_FORMAT_TYPE)) { fprintf(stderr, "AudioRecipe format type incorrect (expected \"" AUDIO_RECIPE_FORMAT_TYPE "\"; got \"%s\")\n", formatType); return false; } formatVersion = call_virtual(readUInt16, context, "format_version"); if (context->status != SERIALIZATION_ERROR_OK || formatVersion > AUDIO_RECIPE_FORMAT_VERSION) { fprintf(stderr, "AudioRecipe format version %u too new; can't understand newer than %u\n", formatVersion, AUDIO_RECIPE_FORMAT_VERSION); return false; } self->outputFile = (char *) call_virtual(readStringNullable, context, "output_file"); self->outputFormat = call_virtual(readEnumeration, context, "output_format", "unspecified", OUTPUT_FORMAT_UNSPECIFIED, "wav", OUTPUT_FORMAT_WAV, "ogg", OUTPUT_FORMAT_OGG, NULL); self->bytesPerSample = call_virtual(readUInt8, context, "bytes_per_sample"); self->channelCount = call_virtual(readUInt8, context, "channel_count"); self->sampleRate = call_virtual(readUInt32, context, "sample_rate"); self->commandCount = call_virtual(beginDictionary, context, "commands"); if (self->commandCount > 0) { self->commands = calloc(self->commandCount, sizeof(*self->commands)); for (unsigned int commandIndex = 0; commandIndex < self->commandCount; commandIndex++) { commandString = call_virtual(readNextDictionaryKey, context); if (commandString == NULL) { fprintf(stderr, "AudioRecipe deserialization failed around command %u\n", commandIndex); AudioRecipe_dispose(self); return false; } call_virtual(beginStructure, context, commandString); self->commands[commandIndex].frameOffset = call_virtual(readUInt64, context, "frame_offset"); self->commands[commandIndex].frameCount = call_virtual(readUInt64, context, "frame_count"); if (!strcmp(commandString, getCommandSerializationName(COMMAND_MIX))) { self->commands[commandIndex].type = COMMAND_MIX; self->commands[commandIndex].data.mix.sourceType = call_virtual(readEnumeration, context, "source_type", "wav_file", SOURCE_WAV_FILE, "ogg_file", SOURCE_OGG_FILE, "recipe", SOURCE_RECIPE, "bfxr_synth", SOURCE_BFXR_SYNTH, "sfxrX_synth", SOURCE_SFXRX_SYNTH, NULL); nameString = call_virtual(readString, context, "source_name"); if (nameString != NULL) { self->commands[commandIndex].data.mix.sourceName = strdup(nameString); } self->commands[commandIndex].data.mix.sourceFrameOffset = call_virtual(readUInt64, context, "source_frame_offset"); } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_AMPLIFY))) { self->commands[commandIndex].type = COMMAND_AMPLIFY; self->commands[commandIndex].data.scale.multiplier = call_virtual(readInt32, context, "multiplier"); self->commands[commandIndex].data.scale.divisor = call_virtual(readInt32, context, "divisor"); } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_NORMALIZE))) { self->commands[commandIndex].type = COMMAND_NORMALIZE; } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_STRETCH))) { self->commands[commandIndex].type = COMMAND_STRETCH; self->commands[commandIndex].data.scale.multiplier = call_virtual(readInt32, context, "multiplier"); self->commands[commandIndex].data.scale.divisor = call_virtual(readInt32, context, "divisor"); } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_REVERSE))) { self->commands[commandIndex].type = COMMAND_REVERSE; } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_DELETE))) { self->commands[commandIndex].type = COMMAND_DELETE; } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_FADE))) { self->commands[commandIndex].type = COMMAND_FADE; const char * functionName = call_virtual(readString, context, "function"); if (functionName != NULL) { if (!strcmp(functionName, "ramp_up_linear")) { self->commands[commandIndex].data.envelope.function = AudioRecipe_envelope_rampUpLinear; } else if (!strcmp(functionName, "ramp_down_linear")) { self->commands[commandIndex].data.envelope.function = AudioRecipe_envelope_rampDownLinear; } else { AudioRecipe_dispose(self); fprintf(stderr, "AudioRecipe fade command specified unknown envelope function \"%s\"\n", functionName); return false; } } } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_EQUALIZE))) { self->commands[commandIndex].type = COMMAND_EQUALIZE; call_virtual(beginArray, context, "bands"); for (unsigned int bandIndex = 0; bandIndex < sizeof(self->commands[commandIndex].data.equalize.bands) / sizeof(self->commands[commandIndex].data.equalize.bands[0]); bandIndex++) { self->commands[commandIndex].data.equalize.bands[bandIndex] = call_virtual(readFloat, context, NULL); } call_virtual(endArray, context); } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_CHANGE_SPEED))) { self->commands[commandIndex].type = COMMAND_CHANGE_SPEED; self->commands[commandIndex].data.scale.multiplier = call_virtual(readInt32, context, "multiplier"); self->commands[commandIndex].data.scale.divisor = call_virtual(readInt32, context, "divisor"); } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_CHANGE_PITCH))) { self->commands[commandIndex].type = COMMAND_CHANGE_PITCH; self->commands[commandIndex].data.scale.multiplier = call_virtual(readInt32, context, "multiplier"); self->commands[commandIndex].data.scale.divisor = call_virtual(readInt32, context, "divisor"); } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_SILENCE))) { self->commands[commandIndex].type = COMMAND_SILENCE; } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_TONE))) { self->commands[commandIndex].type = COMMAND_TONE; self->commands[commandIndex].data.tone.frequency = call_virtual(readFloat, context, "frequency"); const char * functionName = call_virtual(readString, context, "function"); if (functionName != NULL) { if (!strcmp(functionName, "sine")) { self->commands[commandIndex].data.tone.function = AudioRecipe_wave_sine; } else if (!strcmp(functionName, "square")) { self->commands[commandIndex].data.tone.function = AudioRecipe_wave_square; } else if (!strcmp(functionName, "triangle")) { self->commands[commandIndex].data.tone.function = AudioRecipe_wave_triangle; } else if (!strcmp(functionName, "sawtooth")) { self->commands[commandIndex].data.tone.function = AudioRecipe_wave_sawtooth; } } } else if (!strcmp(commandString, getCommandSerializationName(COMMAND_NOISE))) { self->commands[commandIndex].type = COMMAND_NOISE; self->commands[commandIndex].data.noise.seed = call_virtual(readUInt32, context, "seed"); } call_virtual(endStructure, context); } } call_virtual(endDictionary, context); call_virtual(endStructure, context); if (self->outputFile != NULL) { self->outputFile = strdup(self->outputFile); } return true; } void AudioRecipe_serialize(AudioRecipe * self, compat_type(SerializationContext *) serializationContext) { SerializationContext * context = serializationContext; call_virtual(beginStructure, context, AUDIO_RECIPE_FORMAT_TYPE); call_virtual(writeString, context, "format_type", AUDIO_RECIPE_FORMAT_TYPE); call_virtual(writeUInt16, context, "format_version", AUDIO_RECIPE_FORMAT_VERSION); call_virtual(writeStringNullable, context, "output_file", self->outputFile); call_virtual(writeEnumeration, context, "output_format", self->outputFormat, "unspecified", OUTPUT_FORMAT_UNSPECIFIED, "wav", OUTPUT_FORMAT_WAV, "ogg", OUTPUT_FORMAT_OGG, NULL); call_virtual(writeUInt8, context, "bytes_per_sample", self->bytesPerSample); call_virtual(writeUInt8, context, "channel_count", self->channelCount); call_virtual(writeUInt32, context, "sample_rate", self->sampleRate); call_virtual(beginDictionary, context, "commands"); for (unsigned int commandIndex = 0; commandIndex < self->commandCount; commandIndex++) { call_virtual(beginStructure, context, getCommandSerializationName(self->commands[commandIndex].type)); call_virtual(writeUInt64, context, "frame_offset", self->commands[commandIndex].frameOffset); call_virtual(writeUInt64, context, "frame_count", self->commands[commandIndex].frameCount); switch (self->commands[commandIndex].type) { case COMMAND_MIX: call_virtual(writeEnumeration, context, "source_type", self->commands[commandIndex].data.mix.sourceType, "wav_file", SOURCE_WAV_FILE, "ogg_file", SOURCE_OGG_FILE, "recipe", SOURCE_RECIPE, "bfxr_synth", SOURCE_BFXR_SYNTH, "sfxrX_synth", SOURCE_SFXRX_SYNTH, NULL); call_virtual(writeString, context, "source_name", self->commands[commandIndex].data.mix.sourceName); call_virtual(writeUInt64, context, "source_frame_offset", self->commands[commandIndex].data.mix.sourceFrameOffset); break; case COMMAND_AMPLIFY: case COMMAND_STRETCH: case COMMAND_CHANGE_SPEED: case COMMAND_CHANGE_PITCH: call_virtual(writeInt32, context, "multiplier", self->commands[commandIndex].data.scale.multiplier); call_virtual(writeInt32, context, "divisor", self->commands[commandIndex].data.scale.divisor); break; case COMMAND_FADE: if (self->commands[commandIndex].data.envelope.function == AudioRecipe_envelope_rampUpLinear) { call_virtual(writeString, context, "function", "ramp_up_linear"); } else if (self->commands[commandIndex].data.tone.function == AudioRecipe_envelope_rampDownLinear) { call_virtual(writeString, context, "function", "ramp_down_linear"); } else { fprintf(stderr, "Warning: AudioRecipe with an unknown envelope function (%p) is being serialized as AudioRecipe_envelope_rampUpLinear\n", self->commands[commandIndex].data.envelope.function); call_virtual(writeString, context, "function", "ramp_up_linear"); } break; case COMMAND_EQUALIZE: call_virtual(beginArray, context, "bands"); for (unsigned int bandIndex = 0; bandIndex < sizeof(self->commands[commandIndex].data.equalize.bands) / sizeof(self->commands[commandIndex].data.equalize.bands[0]); bandIndex++) { call_virtual(writeFloat, context, NULL, self->commands[commandIndex].data.equalize.bands[bandIndex]); } call_virtual(endArray, context); break; case COMMAND_TONE: call_virtual(writeFloat, context, "frequency", self->commands[commandIndex].data.tone.frequency); if (self->commands[commandIndex].data.tone.function == AudioRecipe_wave_sine) { call_virtual(writeString, context, "function", "sine"); } else if (self->commands[commandIndex].data.tone.function == AudioRecipe_wave_square) { call_virtual(writeString, context, "function", "square"); } else if (self->commands[commandIndex].data.tone.function == AudioRecipe_wave_triangle) { call_virtual(writeString, context, "function", "triangle"); } else if (self->commands[commandIndex].data.tone.function == AudioRecipe_wave_sawtooth) { call_virtual(writeString, context, "function", "sawtooth"); } else { fprintf(stderr, "Warning: AudioRecipe with an unknown wave function (%p) is being serialized as AudioRecipe_wave_sine\n", self->commands[commandIndex].data.tone.function); call_virtual(writeString, context, "function", "sine"); } break; case COMMAND_NOISE: call_virtual(writeUInt32, context, "seed", self->commands[commandIndex].data.noise.seed); break; case COMMAND_NORMALIZE: case COMMAND_REVERSE: case COMMAND_DELETE: case COMMAND_SILENCE: break; } call_virtual(endStructure, context); } call_virtual(endDictionary, context); call_virtual(endStructure, context); } static PCMAudio * referenceSourceAudio(ResourceManager * resourceManager, AudioRecipe_sourceType type, const char * name) { switch (type) { case SOURCE_WAV_FILE: return ResourceManager_referenceResource(resourceManager, ATOM("wav_file"), name); case SOURCE_OGG_FILE: return ResourceManager_referenceResource(resourceManager, ATOM("vorbis_file"), name); case SOURCE_RECIPE: return ResourceManager_referenceResource(resourceManager, ATOM("audiolab_recipe_result"), name); case SOURCE_BFXR_SYNTH: return ResourceManager_referenceResource(resourceManager, ATOM("bfxrsound_result"), name); case SOURCE_SFXRX_SYNTH: return ResourceManager_referenceResource(resourceManager, ATOM("sfxrXsound_result"), name); } return NULL; } static uint64_t adjustFrameCount(uint64_t frameCount, uint64_t frameOffset, uint64_t maxFrameCount) { if (frameOffset >= maxFrameCount) { return 0; } if (frameCount == 0 || frameOffset + frameCount > maxFrameCount) { frameCount = maxFrameCount - frameOffset; } return frameCount; } PCMAudio * AudioRecipe_evaluate(AudioRecipe * self, ResourceManager * resourceManager) { uint64_t totalFrameCount = 0, maxFrameCountDelta = 0; int64_t frameCountDelta = 0; for (unsigned int commandIndex = 0; commandIndex < self->commandCount; commandIndex++) { switch (self->commands[commandIndex].type) { case COMMAND_MIX: { uint64_t frameCount = self->commands[commandIndex].frameCount; PCMAudio * sourceAudio = referenceSourceAudio(resourceManager, self->commands[commandIndex].data.mix.sourceType, self->commands[commandIndex].data.mix.sourceName); if (sourceAudio == NULL) { fprintf(stderr, "AudioRecipe_evaluate: Failed to load \"%s\" as type %d\n", self->commands[commandIndex].data.mix.sourceName, self->commands[commandIndex].data.mix.sourceType); return NULL; } if (frameCount == 0) { frameCount = sourceAudio->frameCount; } frameCount = frameCount * self->sampleRate / sourceAudio->sampleRate; ResourceManager_releaseResource(resourceManager, sourceAudio); if (self->commands[commandIndex].frameOffset + frameCount > totalFrameCount) { totalFrameCount = self->commands[commandIndex].frameOffset + frameCount; } break; } case COMMAND_STRETCH: case COMMAND_CHANGE_SPEED: { uint64_t frameCount = self->commands[commandIndex].frameCount; if (frameCount == 0) { frameCount = totalFrameCount - self->commands[commandIndex].frameOffset; } int64_t stretchedFrameCount = (int64_t) (frameCount * self->commands[commandIndex].data.scale.multiplier / self->commands[commandIndex].data.scale.divisor); frameCountDelta += stretchedFrameCount - (int64_t) frameCount; if (frameCountDelta > (int64_t) maxFrameCountDelta) { maxFrameCountDelta = (uint64_t) frameCountDelta; } break; } case COMMAND_DELETE: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, totalFrameCount); frameCountDelta -= (int64_t) frameCount; break; } case COMMAND_SILENCE: case COMMAND_TONE: case COMMAND_NOISE: { uint64_t frameCount = self->commands[commandIndex].frameCount; if (self->commands[commandIndex].frameOffset > totalFrameCount) { frameCount += self->commands[commandIndex].frameOffset - totalFrameCount; } totalFrameCount += frameCount; break; } case COMMAND_AMPLIFY: case COMMAND_NORMALIZE: case COMMAND_FADE: case COMMAND_EQUALIZE: case COMMAND_CHANGE_PITCH: case COMMAND_REVERSE: break; } } void * samples = calloc(self->bytesPerSample * self->channelCount, totalFrameCount + maxFrameCountDelta); PCMAudio * result = PCMAudio_create(self->bytesPerSample, self->channelCount, self->sampleRate, 0, samples, false); unsigned int bytesPerFrame = result->channelCount * result->bytesPerSample; for (unsigned int commandIndex = 0; commandIndex < self->commandCount; commandIndex++) { switch (self->commands[commandIndex].type) { case COMMAND_MIX: { PCMAudio * sourceAudio = referenceSourceAudio(resourceManager, self->commands[commandIndex].data.mix.sourceType, self->commands[commandIndex].data.mix.sourceName); uint64_t frameCount = self->commands[commandIndex].frameCount; if (frameCount == 0) { frameCount = sourceAudio->frameCount; } mixAudioSamples(sourceAudio->samples + self->commands[commandIndex].data.mix.sourceFrameOffset * sourceAudio->channelCount * sourceAudio->bytesPerSample, sourceAudio->channelCount, sourceAudio->sampleRate, sourceAudio->bytesPerSample, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->sampleRate, result->bytesPerSample, frameCount, result->frameCount - self->commands[commandIndex].frameOffset); frameCount = frameCount * self->sampleRate / sourceAudio->sampleRate; ResourceManager_releaseResource(resourceManager, sourceAudio); if (self->commands[commandIndex].frameOffset + frameCount > result->frameCount) { result->frameCount = self->commands[commandIndex].frameOffset + frameCount; } break; } case COMMAND_AMPLIFY: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, result->frameCount); if (frameCount == 0) { break; } amplifyAudioSamples(self->commands[commandIndex].data.scale.multiplier, self->commands[commandIndex].data.scale.divisor, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->bytesPerSample, frameCount); break; } case COMMAND_NORMALIZE: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, result->frameCount); if (frameCount == 0) { break; } normalizeAudioSamples(result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->bytesPerSample, frameCount); break; } case COMMAND_STRETCH: { if (self->commands[commandIndex].frameOffset >= result->frameCount || self->commands[commandIndex].data.scale.multiplier < 1 || self->commands[commandIndex].data.scale.divisor < 1) { break; } uint64_t frameCount = self->commands[commandIndex].frameCount; if (frameCount == 0) { frameCount = result->frameCount - self->commands[commandIndex].frameOffset; } if (self->commands[commandIndex].data.scale.multiplier > self->commands[commandIndex].data.scale.divisor) { if (self->commands[commandIndex].frameOffset + frameCount < result->frameCount) { memmove(result->samples + (self->commands[commandIndex].frameOffset + frameCount * self->commands[commandIndex].data.scale.multiplier / self->commands[commandIndex].data.scale.divisor) * bytesPerFrame, result->samples + (self->commands[commandIndex].frameOffset + frameCount) * bytesPerFrame, (result->frameCount - (self->commands[commandIndex].frameOffset + frameCount)) * bytesPerFrame); } } stretchAudioSamples(self->commands[commandIndex].data.scale.multiplier, self->commands[commandIndex].data.scale.divisor, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->bytesPerSample, frameCount); if (self->commands[commandIndex].data.scale.multiplier < self->commands[commandIndex].data.scale.divisor) { if (self->commands[commandIndex].frameOffset + frameCount < result->frameCount) { memmove(result->samples + (self->commands[commandIndex].frameOffset + frameCount * self->commands[commandIndex].data.scale.multiplier / self->commands[commandIndex].data.scale.divisor) * bytesPerFrame, result->samples + (self->commands[commandIndex].frameOffset + frameCount) * bytesPerFrame, (result->frameCount - (self->commands[commandIndex].frameOffset + frameCount)) * bytesPerFrame); } } result->frameCount += (int64_t) frameCount * self->commands[commandIndex].data.scale.multiplier / self->commands[commandIndex].data.scale.divisor - frameCount; break; } case COMMAND_REVERSE: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, result->frameCount); if (frameCount == 0) { break; } reverseAudioSamples(result->samples + self->commands[commandIndex].frameOffset, result->channelCount, result->bytesPerSample, frameCount); break; } case COMMAND_DELETE: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, result->frameCount); if (frameCount == 0) { break; } if (self->commands[commandIndex].frameOffset + frameCount < result->frameCount) { memmove(result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->samples + (self->commands[commandIndex].frameOffset + frameCount) * bytesPerFrame, (result->frameCount - (self->commands[commandIndex].frameOffset + frameCount)) * bytesPerFrame); } result->frameCount -= frameCount; break; } case COMMAND_FADE: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, result->frameCount); if (frameCount == 0) { break; } applyAmplitudeEnvelope(self->commands[commandIndex].data.envelope.function, result->samples + self->commands[commandIndex].frameOffset, result->channelCount, result->bytesPerSample, frameCount); break; } case COMMAND_EQUALIZE: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, result->frameCount); if (frameCount == 0) { break; } equalizeAudioSamples(10, self->commands[commandIndex].data.equalize.bands, result->samples + self->commands[commandIndex].frameOffset, result->channelCount, result->bytesPerSample, result->sampleRate, frameCount); break; } case COMMAND_CHANGE_SPEED: { if (self->commands[commandIndex].frameOffset >= result->frameCount || self->commands[commandIndex].data.scale.multiplier < 1 || self->commands[commandIndex].data.scale.divisor < 1) { break; } uint64_t frameCount = self->commands[commandIndex].frameCount; if (frameCount == 0) { frameCount = result->frameCount - self->commands[commandIndex].frameOffset; } if (self->commands[commandIndex].data.scale.multiplier > self->commands[commandIndex].data.scale.divisor) { if (self->commands[commandIndex].frameOffset + frameCount < result->frameCount) { memmove(result->samples + (self->commands[commandIndex].frameOffset + frameCount * self->commands[commandIndex].data.scale.multiplier / self->commands[commandIndex].data.scale.divisor) * bytesPerFrame, result->samples + (self->commands[commandIndex].frameOffset + frameCount) * bytesPerFrame, (result->frameCount - (self->commands[commandIndex].frameOffset + frameCount)) * bytesPerFrame); } } stretchAudioSamplesPreservingPitch(self->commands[commandIndex].data.scale.multiplier, self->commands[commandIndex].data.scale.divisor, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->bytesPerSample, frameCount); if (self->commands[commandIndex].data.scale.multiplier < self->commands[commandIndex].data.scale.divisor) { if (self->commands[commandIndex].frameOffset + frameCount < result->frameCount) { memmove(result->samples + (self->commands[commandIndex].frameOffset + frameCount * self->commands[commandIndex].data.scale.multiplier / self->commands[commandIndex].data.scale.divisor) * bytesPerFrame, result->samples + (self->commands[commandIndex].frameOffset + frameCount) * bytesPerFrame, (result->frameCount - (self->commands[commandIndex].frameOffset + frameCount)) * bytesPerFrame); } } result->frameCount += (int64_t) frameCount * self->commands[commandIndex].data.scale.multiplier / self->commands[commandIndex].data.scale.divisor - frameCount; break; } case COMMAND_CHANGE_PITCH: { uint64_t frameCount = adjustFrameCount(self->commands[commandIndex].frameCount, self->commands[commandIndex].frameOffset, result->frameCount); if (frameCount == 0) { break; } pitchAdjustAudioSamples((float) self->commands[commandIndex].data.scale.multiplier / (float) self->commands[commandIndex].data.scale.divisor, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->bytesPerSample, frameCount); break; } case COMMAND_SILENCE: if (self->commands[commandIndex].frameOffset + self->commands[commandIndex].frameCount < result->frameCount) { memmove(result->samples + (self->commands[commandIndex].frameOffset + self->commands[commandIndex].frameCount) * bytesPerFrame, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, (result->frameCount - self->commands[commandIndex].frameOffset) * bytesPerFrame); } memset(result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, 0x00, self->commands[commandIndex].frameCount * bytesPerFrame); result->frameCount += self->commands[commandIndex].frameCount; break; case COMMAND_TONE: if (self->commands[commandIndex].frameOffset + self->commands[commandIndex].frameCount < result->frameCount) { memmove(result->samples + (self->commands[commandIndex].frameOffset + self->commands[commandIndex].frameCount) * bytesPerFrame, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, (result->frameCount - self->commands[commandIndex].frameOffset) * bytesPerFrame); } generateTone(self->commands[commandIndex].data.tone.frequency, self->commands[commandIndex].data.tone.function, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->sampleRate, result->bytesPerSample, self->commands[commandIndex].frameCount); result->frameCount += self->commands[commandIndex].frameCount; break; case COMMAND_NOISE: if (self->commands[commandIndex].frameOffset + self->commands[commandIndex].frameCount < result->frameCount) { memmove(result->samples + (self->commands[commandIndex].frameOffset + self->commands[commandIndex].frameCount) * bytesPerFrame, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, (result->frameCount - self->commands[commandIndex].frameOffset) * bytesPerFrame); } generateNoise(self->commands[commandIndex].data.noise.seed, result->samples + self->commands[commandIndex].frameOffset * bytesPerFrame, result->channelCount, result->bytesPerSample, self->commands[commandIndex].frameCount); result->frameCount += self->commands[commandIndex].frameCount; break; } } return result; } float AudioRecipe_envelope_rampUpLinear(float value) { return value; } float AudioRecipe_envelope_rampDownLinear(float value) { return 1.0f - value; } float AudioRecipe_wave_sine(float position) { return sinf(position * M_PI * 2); } float AudioRecipe_wave_square(float position) { return fmodf(position, 1.0f) <= 0.5f ? 1.0f : -1.0f; } float AudioRecipe_wave_triangle(float position) { position = fmodf(position, 1.0f); return (position <= 0.5f ? position : (1.0f - position)) * 4.0f - 1.0f; } float AudioRecipe_wave_sawtooth(float position) { return position * 2.0f - 1.0f; }