/* Copyright (c) 2020 Alex Diener This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Alex Diener alex@ludobloom.com */ #include "audiosynth/AudioMath.h" #include "gamemath/PCGRandom.h" #include #include #include AudioResampleState initAudioResampleState(void) { AudioResampleState resampleState = {{0.0f, 0.0f}, 0.0f}; return resampleState; } #define clampFrame_float(in_sample) \ in_sample = fmaxf(-1.0f, fminf(1.0f, in_sample)); #define clampFrame_none(in_sample) #define readFrame_mono(clamp_frame) \ inSample[0] = inSamplesTyped[inFrameIndex]; \ clamp_frame(inSample[0]); \ inFrameIndex += 1; #define readFrame_stereo(clamp_frame) \ inSample[0] = inSamplesTyped[inFrameIndex * 2]; \ inSample[1] = inSamplesTyped[inFrameIndex * 2 + 1]; \ clamp_frame(inSample[0]); \ clamp_frame(inSample[1]); \ inFrameIndex += 1; #define convertFrame_mono(transfer_multiplier, transfer_divisor) \ outSample[0] = inSample[0] * (transfer_multiplier) / (transfer_divisor); #define convertFrame_stereo(transfer_multiplier, transfer_divisor) \ outSample[0] = inSample[0] * (transfer_multiplier) / (transfer_divisor); \ outSample[1] = inSample[1] * (transfer_multiplier) / (transfer_divisor); #define convertFrame_repeat(transfer_multiplier, transfer_divisor) \ outSample[0] = outSample[1] = inSample[0] * (transfer_multiplier) / (transfer_divisor); #define convertFrame_mix(transfer_multiplier, transfer_divisor) \ outSample[0] = outSample[1] = (inSample[0] + inSample[1]) / 2 * (transfer_multiplier) / (transfer_divisor); #define writeFrame_mono() \ outSamplesTyped[outFrameIndex] = outSample[0]; \ outFrameIndex += 1; #define writeFrame_stereo() \ outSamplesTyped[outFrameIndex * 2] = outSample[0]; \ outSamplesTyped[outFrameIndex * 2 + 1] = outSample[1]; \ outFrameIndex += 1; #define convert_null(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ while (inFrameIndex < inFrameCount && outFrameIndex < outFrameCountMax) { \ read_frame(clamp_frame) \ convert_frame(transfer_multiplier, transfer_divisor) \ write_frame() \ } #define convert_resample(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ float inOffsetPerOutFrame = (float) inSampleRate / outSampleRate; \ float inFrameTargetIndex = -resampleState.lastSampleBlend; \ while (outFrameIndex < outFrameCountMax && inFrameIndex < inFrameCount) { \ float inSampleWeight = resampleState.lastSampleBlend; \ float sample[AUDIO_CHANNEL_COUNT_MAX]; \ sample[0] = resampleState.lastSample[0] * inSampleWeight; \ sample[1] = resampleState.lastSample[1] * inSampleWeight; \ inFrameTargetIndex += inOffsetPerOutFrame; \ while (inFrameIndex + 1 < inFrameTargetIndex && inFrameIndex < inFrameCount) { \ read_frame(clampFrame_none); \ resampleState.lastSample[0] = inSample[0]; \ resampleState.lastSample[1] = inSample[1]; \ sample[0] += resampleState.lastSample[0]; \ sample[1] += resampleState.lastSample[1]; \ inSampleWeight += 1.0f; \ } \ if (inFrameIndex < inFrameTargetIndex && inFrameIndex < inFrameCount) { \ float weight = inFrameTargetIndex - inFrameIndex; \ read_frame(clampFrame_none); \ resampleState.lastSample[0] = inSample[0]; \ resampleState.lastSample[1] = inSample[1]; \ sample[0] += resampleState.lastSample[0] * weight; \ sample[1] += resampleState.lastSample[1] * weight; \ inSampleWeight += weight; \ } \ resampleState.lastSampleBlend = inFrameIndex - inFrameTargetIndex; \ sample[0] /= inSampleWeight; \ sample[1] /= inSampleWeight; \ inSample[0] = sample[0]; \ inSample[1] = sample[1]; \ convert_frame(transfer_multiplier, transfer_divisor); \ write_frame() \ } /* #define convert_upsample(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ while (inFrameIndex < inFrameCount && outFrameIndex < outFrameCountMax) { \ read_frame(clamp_frame) \ convert_frame(transfer_multiplier, transfer_divisor) \ resampleSlop += (int) outSampleRate; \ while (resampleSlop >= (int) inSampleRate && outFrameIndex < outFrameCountMax) { \ resampleSlop -= (int) inSampleRate; \ write_frame() \ } \ } \ if (inFrameIndex == inFrameCount && outFrameIndex == outFrameCountMax - 1) { \ write_frame() \ } #define convert_downsample(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ while (inFrameIndex < inFrameCount && outFrameIndex < outFrameCountMax) { \ read_frame(clamp_frame) \ resampleSlop += (int) outSampleRate; \ while (resampleSlop >= (int) inSampleRate && outFrameIndex < outFrameCountMax) { \ resampleSlop -= (int) inSampleRate; \ convert_frame(transfer_multiplier, transfer_divisor) \ write_frame() \ } \ } #define convertSamples_sampleRate(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ if (inSampleRate < outSampleRate) { \ convert_upsample(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ } else if (inSampleRate > outSampleRate) { \ convert_downsample(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ } else { \ convert_null(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ } */ #define convertSamples_sampleRate(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ if (inSampleRate != outSampleRate) { \ convert_resample(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ } else { \ convert_null(read_frame, clamp_frame, convert_frame, transfer_multiplier, transfer_divisor, write_frame) \ } #define convertSamples_channelCount(sample_type_in, sample_type_out, clamp_frame, transfer_multiplier, transfer_divisor) \ const sample_type_in * inSamplesTyped = inSamples; \ sample_type_in inSample[AUDIO_CHANNEL_COUNT_MAX] = {0}; \ sample_type_out * outSamplesTyped = outSamples, outSample[AUDIO_CHANNEL_COUNT_MAX] = {0}; \ \ if (inChannelCount == 1 && outChannelCount == 1) { \ convertSamples_sampleRate(readFrame_mono, clamp_frame, convertFrame_mono, transfer_multiplier, transfer_divisor, writeFrame_mono) \ \ } else if (inChannelCount == 2 && outChannelCount == 2) { \ convertSamples_sampleRate(readFrame_stereo, clamp_frame, convertFrame_stereo, transfer_multiplier, transfer_divisor, writeFrame_stereo) \ \ } else if (inChannelCount == 1 && outChannelCount == 2) { \ convertSamples_sampleRate(readFrame_mono, clamp_frame, convertFrame_repeat, transfer_multiplier, transfer_divisor, writeFrame_stereo) \ \ } else if (inChannelCount == 2 && outChannelCount == 1) { \ convertSamples_sampleRate(readFrame_stereo, clamp_frame, convertFrame_mix, transfer_multiplier, transfer_divisor, writeFrame_mono) \ } void convertAudioSamples(const void * inSamples, AudioFrameIndex inFrameCount, unsigned int inChannelCount, unsigned int inSampleRate, unsigned int inBytesPerSample, void * outSamples, AudioFrameIndex outFrameCountMax, unsigned int outChannelCount, unsigned int outSampleRate, unsigned int outBytesPerSample, AudioFrameIndex * outFramesRead, AudioFrameIndex * outFramesWritten, AudioResampleState * ioResampleState) { AudioFrameIndex inFrameIndex = 0, outFrameIndex = 0; AudioResampleState resampleState = {{0.0f, 0.0f}, 0.0f}; if (ioResampleState != NULL) { resampleState = *ioResampleState; } if (inBytesPerSample == outBytesPerSample && inSampleRate == outSampleRate && inChannelCount == outChannelCount) { AudioFrameIndex frameCount = inFrameCount < outFrameCountMax ? inFrameCount : outFrameCountMax; memcpy(outSamples, inSamples, frameCount * inChannelCount * inBytesPerSample); inFrameIndex = outFrameIndex = frameCount; } else { if (inBytesPerSample == 4 && outBytesPerSample == 4) { convertSamples_channelCount(float, float, clampFrame_none, 1, 1); } else if (inBytesPerSample == 2 && outBytesPerSample == 2) { convertSamples_channelCount(int16_t, int16_t, clampFrame_none, 1, 1); } else if (inBytesPerSample == 1 && outBytesPerSample == 1) { convertSamples_channelCount(int8_t, int8_t, clampFrame_none, 1, 1); } else if (inBytesPerSample == 4) { if (outBytesPerSample == 2) { convertSamples_channelCount(float, int16_t, clampFrame_float, 32767.0f, 1); } else { convertSamples_channelCount(float, int8_t, clampFrame_float, 127.0f, 1); } } else if (outBytesPerSample == 4) { if (inBytesPerSample == 2) { convertSamples_channelCount(int16_t, float, clampFrame_none, 1.0f, 32767.0f); } else { convertSamples_channelCount(int8_t, float, clampFrame_none, 1.0f, 127.0f); } } else if (inBytesPerSample == 2) { convertSamples_channelCount(int16_t, int8_t, clampFrame_none, 1, 256); } else { convertSamples_channelCount(int8_t, int16_t, clampFrame_none, 256, 1); } } if (outFramesRead != NULL) { *outFramesRead = inFrameIndex; } if (outFramesWritten != NULL) { *outFramesWritten = outFrameIndex; } if (ioResampleState != NULL) { *ioResampleState = resampleState; } } #define MIX_BUFFER_FRAME_COUNT 4096 #define mixAudioSamples_implementation(sample_type, expanded_sample_type, sample_max, sample_min) \ sample_type * typedIOSamples = ioMixedSamples, * typedMixBuffer = (sample_type *) mixBuffer; \ expanded_sample_type sample; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < framesWritten; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < outChannelCount; channelIndex++) { \ sample = typedMixBuffer[frameIndex * outChannelCount + channelIndex] * channelMultipliers2[channelIndex]; \ sample += typedIOSamples[frameIndex * outChannelCount + channelIndex]; \ if (sample > sample_max) { \ sample = sample_max; \ } else if (sample < sample_min) { \ sample = sample_min; \ } \ typedIOSamples[frameIndex * outChannelCount + channelIndex] = sample; \ } \ } void mixAudioSamples(const void * inSamples, unsigned int inChannelCount, unsigned int inSampleRate, unsigned int inBytesPerSample, void * ioMixedSamples, unsigned int outChannelCount, unsigned int outSampleRate, unsigned int outBytesPerSample, float * channelMultipliers, AudioFrameIndex inFrameCount, AudioFrameIndex outFrameCountMax, AudioFrameIndex * outFramesRead, AudioFrameIndex * outFramesWritten, AudioResampleState * ioResampleState) { AudioFrameIndex framesReadTotal = 0, framesWrittenTotal = 0; AudioResampleState resampleState = {{0.0f, 0.0f}, 0.0f}; assert(inChannelCount == 1 || inChannelCount == 2); assert(outChannelCount == 1 || outChannelCount == 2); assert(inBytesPerSample == 1 || inBytesPerSample == 2 || inBytesPerSample == 4); assert(outBytesPerSample == 1 || outBytesPerSample == 2 || outBytesPerSample == 4); if (ioResampleState != NULL) { resampleState = *ioResampleState; } char mixBuffer[2 * 4 * MIX_BUFFER_FRAME_COUNT]; while (inFrameCount > 0 && outFrameCountMax > 0) { AudioFrameIndex framesRead, framesWritten; AudioFrameIndex mixFrameCount = outFrameCountMax; if (mixFrameCount > MIX_BUFFER_FRAME_COUNT) { mixFrameCount = MIX_BUFFER_FRAME_COUNT; } convertAudioSamples(inSamples, inFrameCount, inChannelCount, inSampleRate, inBytesPerSample, mixBuffer, mixFrameCount, outChannelCount, outSampleRate, outBytesPerSample, &framesRead, &framesWritten, &resampleState); inSamples += framesRead * inChannelCount * inBytesPerSample; framesReadTotal += framesRead; framesWrittenTotal += framesWritten; if (framesWritten > outFrameCountMax) { framesWritten = outFrameCountMax; } float channelMultipliers2[outChannelCount]; for (unsigned int channelIndex = 0; channelIndex < outChannelCount; channelIndex++) { channelMultipliers2[channelIndex] = channelMultipliers[channelIndex]; } if (outBytesPerSample == 1) { mixAudioSamples_implementation(int8_t, int16_t, INT8_MAX, INT8_MIN) } else if (outBytesPerSample == 2) { mixAudioSamples_implementation(int16_t, int32_t, INT16_MAX, INT16_MIN) } else { mixAudioSamples_implementation(float, float, 1.0f, -1.0f) } ioMixedSamples += framesWritten * outChannelCount * outBytesPerSample; inFrameCount -= framesRead; outFrameCountMax -= framesWritten; } if (ioResampleState != NULL) { *ioResampleState = resampleState; } if (outFramesRead != NULL) { *outFramesRead = framesReadTotal; } if (outFramesWritten != NULL) { *outFramesWritten = framesWrittenTotal; } } static uint64_t roundUpToPowerOfTwo_uint64(uint64_t input) { // http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 input--; input |= input >> 1; input |= input >> 2; input |= input >> 4; input |= input >> 8; input |= input >> 16; input |= input >> 32; input++; return input; } // http://blogs.zynaptiq.com/bernsee/dft-a-pied/ void smbFft(float * fftBuffer, long fftFrameSize, long sign) { /* FFT routine, (C)1996 S.M.Bernsee. Sign = -1 is FFT, 1 is iFFT (inverse) Fills fftBuffer[0...2*fftFrameSize-1] with the Fourier transform of the time domain data in fftBuffer[0...2*fftFrameSize-1]. The FFT array takes and returns the cosine and sine parts in an interleaved manner, ie. fftBuffer[0] = cosPart[0], fftBuffer[1] = sinPart[0], asf. fftFrameSize must be a power of 2. It expects a complex input signal (see footnote 2), ie. when working with 'common' audio signals our input signal has to be passed as {in[0],0.,in[1],0.,in[2],0.,...} asf. In that case, the transform of the frequencies of interest is in fftBuffer[0...fftFrameSize]. */ float wr, wi, arg, * p1, * p2, temp; float tr, ti, ur, ui, * p1r, * p1i, * p2r, * p2i; long i, bitm, j, le, le2, k, logN; logN = (long) (log(fftFrameSize) / log(2.0) + 0.5); for (i = 2; i < 2 * fftFrameSize - 2; i += 2) { for (bitm = 2, j = 0; bitm < 2 * fftFrameSize; bitm <<= 1) { if (i & bitm) { j++; } j <<= 1; } if (i < j) { p1 = fftBuffer + i; p2 = fftBuffer + j; temp = *p1; *(p1++) = *p2; *(p2++) = temp; temp = *p1; *p1 = *p2; *p2 = temp; } } for (k = 0, le = 2; k < logN; k++) { le <<= 1; le2 = le >> 1; ur = 1.0; ui = 0.0; arg = M_PI / (le2 >> 1); wr = cos(arg); wi = sign * sin(arg); for (j = 0; j < le2; j += 2) { p1r = fftBuffer + j; p1i = p1r + 1; p2r = p1r + le2; p2i = p2r + 1; for (i = j; i < 2 * fftFrameSize; i += le) { tr = *p2r * ur - *p2i * ui; ti = *p2r * ui + *p2i * ur; *p2r = *p1r - tr; *p2i = *p1i - ti; *p1r += tr; *p1i += ti; p1r += le; p1i += le; p2r += le; p2i += le; } tr = ur * wr - ui * wi; ui = ur * wi + ui * wr; ur = tr; } } } float * fourierTransform(const void * inSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount) { AudioFrameIndex frameCountP2 = roundUpToPowerOfTwo_uint64(frameCount); float * transformedSamples = calloc(frameCountP2 * 2, sizeof(float)); convertAudioSamples(inSamples, frameCount, channelCount, 1, bytesPerSample, transformedSamples, frameCount, 1, 1, 4, NULL, NULL, NULL); for (AudioFrameIndex frameIndex = frameCount - 1; frameIndex > 1; frameIndex--) { transformedSamples[frameIndex * 2] = transformedSamples[frameIndex]; transformedSamples[frameIndex * 2 + 1] = 0.0f; } smbFft(transformedSamples, frameCountP2, -1); return transformedSamples; } void inverseFourierTransform(float * transformedSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount, void * outSamples) { AudioFrameIndex frameCountP2 = roundUpToPowerOfTwo_uint64(frameCount); float rescaleMultiplier = 1.0f / frameCountP2; smbFft(transformedSamples, frameCountP2, 1); for (AudioFrameIndex frameIndex = 1; frameIndex < frameCount; frameIndex++) { transformedSamples[frameIndex] = transformedSamples[frameIndex * 2] * rescaleMultiplier; } convertAudioSamples(transformedSamples, frameCount, 1, 1, 4, outSamples, frameCount, channelCount, 1, bytesPerSample, NULL, NULL, NULL); } #define clampAudioSamples_implementation(sample_type) \ sample_type * typedIOSamples = ioSamples, sample; \ sample_type minValueTyped = *(sample_type *) minValue; \ sample_type maxValueTyped = *(sample_type *) maxValue; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ sample = typedIOSamples[frameIndex * channelCount + channelIndex]; \ if (sample > maxValueTyped) { \ sample = maxValueTyped; \ } else if (sample < minValueTyped) { \ sample = minValueTyped; \ } \ typedIOSamples[frameIndex * channelCount + channelIndex] = sample; \ } \ } void clampAudioSamples(void * ioSamples, void * minValue, void * maxValue, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount) { assert(channelCount == 1 || channelCount == 2); assert(bytesPerSample == 1 || bytesPerSample == 2 || bytesPerSample == 4); if (bytesPerSample == 1) { clampAudioSamples_implementation(int8_t) } else if (bytesPerSample == 2) { clampAudioSamples_implementation(int16_t) } else { clampAudioSamples_implementation(float) } } #define amplifyAudioSamples_implementation(sample_type, expanded_sample_type, sample_max, sample_min) \ sample_type * typedIOSamples = ioSamples; \ expanded_sample_type sample; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ sample = typedIOSamples[frameIndex * channelCount + channelIndex] * multiplier; \ if (sample > sample_max) { \ sample = sample_max; \ } else if (sample < sample_min) { \ sample = sample_min; \ } \ typedIOSamples[frameIndex * channelCount + channelIndex] = sample; \ } \ } void amplifyAudioSamples(void * ioSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount, float multiplier) { assert(channelCount == 1 || channelCount == 2); assert(bytesPerSample == 1 || bytesPerSample == 2 || bytesPerSample == 4); if (bytesPerSample == 1) { amplifyAudioSamples_implementation(int8_t, int32_t, INT8_MAX, INT8_MIN) } else if (bytesPerSample == 2) { amplifyAudioSamples_implementation(int16_t, int32_t, INT16_MAX, INT16_MIN) } else { amplifyAudioSamples_implementation(float, float, 1.0f, -1.0f) } } #define applyAmplitudeEnvelope_implementation(sample_type, expanded_sample_type, sample_max, sample_min) \ sample_type * typedIOSamples = ioSamples; \ expanded_sample_type sample; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ float multiplier = function(frameIndex / (float) (frameCount - 1)); \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ sample = typedIOSamples[frameIndex * channelCount + channelIndex] * multiplier; \ if (sample > sample_max) { \ sample = sample_max; \ } else if (sample < sample_min) { \ sample = sample_min; \ } \ typedIOSamples[frameIndex * channelCount + channelIndex] = sample; \ } \ } void applyAmplitudeEnvelope(AudioMath_envelopeFunction function, void * ioSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount) { assert(channelCount == 1 || channelCount == 2); assert(bytesPerSample == 1 || bytesPerSample == 2 || bytesPerSample == 4); if (bytesPerSample == 1) { applyAmplitudeEnvelope_implementation(int8_t, int32_t, INT8_MAX, INT8_MIN) } else if (bytesPerSample == 2) { applyAmplitudeEnvelope_implementation(int16_t, int32_t, INT16_MAX, INT16_MIN) } else { applyAmplitudeEnvelope_implementation(float, float, 1.0f, -1.0f) } } #define normalizeAudioSamples_implementation(sample_type, expanded_sample_type, sample_max, sample_min) \ sample_type * typedIOSamples = ioSamples; \ expanded_sample_type sample, peakSample = 0; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ sample = typedIOSamples[frameIndex * channelCount + channelIndex]; \ if (sample == sample_min) { \ sample = sample_max; \ } else if (sample < 0) { \ sample = -sample; \ } \ if (sample > peakSample) { \ peakSample = sample; \ } \ } \ } \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ sample = typedIOSamples[frameIndex * channelCount + channelIndex] * sample_max / peakSample * maxValue; \ if (sample > sample_max) { \ sample = sample_max; \ } else if (sample < sample_min) { \ sample = sample_min; \ } \ typedIOSamples[frameIndex * channelCount + channelIndex] = sample; \ } \ } void normalizeAudioSamples(void * ioSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount, float maxValue) { assert(channelCount == 1 || channelCount == 2); assert(bytesPerSample == 1 || bytesPerSample == 2 || bytesPerSample == 4); if (bytesPerSample == 1) { normalizeAudioSamples_implementation(int8_t, int32_t, INT8_MAX, INT8_MIN) } else if (bytesPerSample == 2) { normalizeAudioSamples_implementation(int16_t, int32_t, INT16_MAX, INT16_MIN) } else { normalizeAudioSamples_implementation(float, float, 1.0f, -1.0f) } } #define stretchAudioSamples_readFrame_mono() \ sample[0] = inSamplesTyped[inFrameIndex]; \ inFrameIndex += 1; #define stretchAudioSamples_readFrame_stereo() \ sample[0] = inSamplesTyped[inFrameIndex * 2]; \ sample[1] = inSamplesTyped[inFrameIndex * 2 + 1]; \ inFrameIndex += 1; #define stretchAudioSamples_writeFrame_mono() \ outSamplesTyped[outFrameIndex] = sample[0]; \ outFrameIndex += 1; #define stretchAudioSamples_writeFrame_stereo() \ outSamplesTyped[outFrameIndex * 2] = sample[0]; \ outSamplesTyped[outFrameIndex * 2 + 1] = sample[1]; \ outFrameIndex += 1; #define stretchAudioSamples_upsample(read_frame, write_frame) \ while (inFrameIndex < inFrameCount && outFrameIndex < outFrameCountMax) { \ read_frame() \ resampleSlop += multiplier; \ while (resampleSlop >= divisor && outFrameIndex < outFrameCountMax) { \ resampleSlop -= divisor; \ write_frame() \ } \ } \ if (inFrameIndex == inFrameCount && outFrameIndex == outFrameCountMax - 1) { \ write_frame() \ } #define stretchAudioSamples_downsample(read_frame, write_frame) \ while (inFrameIndex < inFrameCount && outFrameIndex < outFrameCountMax) { \ read_frame() \ resampleSlop += multiplier; \ while (resampleSlop >= divisor && outFrameIndex < outFrameCountMax) { \ resampleSlop -= divisor; \ write_frame() \ } \ } #define stretchAudioSamples_implementation(sample_type, stretchAudioSamples_resample) \ const sample_type * inSamplesTyped = ioSamples; \ sample_type * outSamplesTyped = stretchedSamples, sample[2] = {0, 0}; \ \ if (channelCount == 1) { \ stretchAudioSamples_resample(stretchAudioSamples_readFrame_mono, stretchAudioSamples_writeFrame_mono) \ \ } else { \ stretchAudioSamples_resample(stretchAudioSamples_readFrame_stereo, stretchAudioSamples_writeFrame_stereo) \ } void stretchAudioSamples(int multiplier, int divisor, void * ioSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount) { assert(channelCount == 1 || channelCount == 2); assert(bytesPerSample == 1 || bytesPerSample == 2 || bytesPerSample == 4); assert(multiplier >= 1); assert(divisor >= 1); AudioFrameIndex inFrameIndex = 0, outFrameIndex = 0, inFrameCount = frameCount, outFrameCountMax = frameCount * multiplier / divisor; int resampleSlop = 0; void * stretchedSamples = malloc(channelCount * bytesPerSample * frameCount * multiplier / divisor); if (multiplier > divisor) { if (bytesPerSample == 1) { stretchAudioSamples_implementation(int8_t, stretchAudioSamples_upsample) } else if (bytesPerSample == 2) { stretchAudioSamples_implementation(int16_t, stretchAudioSamples_upsample) } else { stretchAudioSamples_implementation(float, stretchAudioSamples_upsample) } } else { if (bytesPerSample == 1) { stretchAudioSamples_implementation(int8_t, stretchAudioSamples_downsample) } else if (bytesPerSample == 2) { stretchAudioSamples_implementation(int16_t, stretchAudioSamples_downsample) } else { stretchAudioSamples_implementation(float, stretchAudioSamples_downsample) } } memcpy(ioSamples, stretchedSamples, channelCount * bytesPerSample * frameCount * multiplier / divisor); free(stretchedSamples); } void equalizeAudioSamples(unsigned int bandCount, float * bands, void * ioSamples, unsigned int channelCount, unsigned int bytesPerSample, unsigned int sampleRate, AudioFrameIndex frameCount) { // TODO } #define reverseAudioSamples_implementation(sample_type) \ sample_type * ioSamplesTyped = ioSamples, * reverseBufferTyped = reverseBuffer; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ reverseBufferTyped[frameIndex * channelCount + channelIndex] = ioSamplesTyped[(frameCount - 1 - frameIndex) * channelCount + channelIndex]; \ } \ } void reverseAudioSamples(void * ioSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount) { assert(channelCount == 1 || channelCount == 2); assert(bytesPerSample == 1 || bytesPerSample == 2 || bytesPerSample == 4); void * reverseBuffer = malloc(frameCount * channelCount * bytesPerSample); if (bytesPerSample == 1) { reverseAudioSamples_implementation(int8_t) } else if (bytesPerSample == 2) { reverseAudioSamples_implementation(int16_t) } else { reverseAudioSamples_implementation(float) } memcpy(ioSamples, reverseBuffer, frameCount * channelCount * bytesPerSample); free(reverseBuffer); } #define centerAudioSamples_implementation(sample_type, expanded_sample_type, sample_max, sample_min) \ sample_type * typedIOSamples = ioSamples; \ sample_type minSample = sample_max, maxSample = sample_min; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ sample_type sample = typedIOSamples[frameIndex * channelCount + channelIndex]; \ if (sample < minSample) { \ minSample = sample; \ } \ if (sample > maxSample) { \ maxSample = sample; \ } \ } \ } \ expanded_sample_type sampleValueOffset = -(minSample + (maxSample - minSample) * 0.5f) + centerValue * sample_max; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ expanded_sample_type expandedSample = typedIOSamples[frameIndex * channelCount + channelIndex] + sampleValueOffset; \ if (expandedSample > sample_max) { \ expandedSample = sample_max; \ } else if (expandedSample < sample_min) { \ expandedSample = sample_min; \ } \ typedIOSamples[frameIndex * channelCount + channelIndex] = expandedSample; \ } \ } void centerAudioSamples(void * ioSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount, float centerValue) { assert(channelCount == 1 || channelCount == 2); assert(bytesPerSample == 1 || bytesPerSample == 2 || bytesPerSample == 4); if (bytesPerSample == 1) { centerAudioSamples_implementation(int8_t, int32_t, INT8_MAX, INT8_MIN) } else if (bytesPerSample == 2) { centerAudioSamples_implementation(int16_t, int32_t, INT16_MAX, INT16_MIN) } else { centerAudioSamples_implementation(float, float, 1.0f, -1.0f) } } #define generateTone_implementation(sample_type, sample_multiplier) \ sample_type * outSamplesTyped = outSamples, sample; \ \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ sample = function(frameIndex * frequency / sampleRate) * sample_multiplier; \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ outSamplesTyped[frameIndex * channelCount + channelIndex] = sample; \ } \ } void generateTone(float frequency, AudioMath_waveFunction function, void * outSamples, unsigned int channelCount, unsigned int sampleRate, unsigned int bytesPerSample, AudioFrameIndex frameCount) { if (bytesPerSample == 1) { generateTone_implementation(int8_t, 127) } else if (bytesPerSample == 2) { generateTone_implementation(int16_t, 32767) } else { generateTone_implementation(float, 1.0f) } } #define generateNoise_implementation(sample_type, sample_multiplier) \ sample_type * outSamplesTyped = outSamples, sample; \ pcg_state pcgState; \ \ pcg32_seed(&pcgState, seed, seed); \ for (AudioFrameIndex frameIndex = 0; frameIndex < frameCount; frameIndex++) { \ sample = pcg32_irand(&pcgState) * sample_multiplier; \ for (unsigned int channelIndex = 0; channelIndex < channelCount; channelIndex++) { \ outSamplesTyped[frameIndex * channelCount + channelIndex] = sample; \ } \ } void generateNoise(uint32_t seed, void * outSamples, unsigned int channelCount, unsigned int bytesPerSample, AudioFrameIndex frameCount) { if (bytesPerSample == 1) { generateNoise_implementation(int8_t, 127) } else if (bytesPerSample == 2) { generateNoise_implementation(int16_t, 32767) } else { generateNoise_implementation(float, 1.0f) } }