diff --git a/apps/fabric-example/ios/Podfile.lock b/apps/fabric-example/ios/Podfile.lock index 2bc3ce669..d9f0227d9 100644 --- a/apps/fabric-example/ios/Podfile.lock +++ b/apps/fabric-example/ios/Podfile.lock @@ -3321,7 +3321,7 @@ SPEC CHECKSUMS: FBLazyVector: 309703e71d3f2f1ed7dc7889d58309c9d77a95a4 fmt: a40bb5bd0294ea969aaaba240a927bd33d878cdd glog: 5683914934d5b6e4240e497e0f4a3b42d1854183 - hermes-engine: 42d6f09ee6ede2feb220e2fb772e8bebb42ca403 + hermes-engine: f93b5009d8ccd9429fe2a772351980df8a22a413 RCT-Folly: 846fda9475e61ec7bcbf8a3fe81edfcaeb090669 RCTDeprecation: a41bbdd9af30bf2e5715796b313e44ec43eefff1 RCTRequired: 7be34aabb0b77c3cefe644528df0fa0afad4e4d0 diff --git a/packages/audiodocs/docs/guides/create-your-own-effect.mdx b/packages/audiodocs/docs/guides/create-your-own-effect.mdx index 9eb8ec8a2..fbdcbe9c1 100644 --- a/packages/audiodocs/docs/guides/create-your-own-effect.mdx +++ b/packages/audiodocs/docs/guides/create-your-own-effect.mdx @@ -45,21 +45,22 @@ For the sake of a simplicity, we will use value as a raw `double` type, not wrap #include namespace audioapi { - class AudioBus; +class AudioBuffer; - class MyProcessorNode : public AudioNode { - public: - explicit MyProcessorNode(BaseAudioContext *context); +class MyProcessorNode : public AudioNode { +public: + explicit MyProcessorNode(const std::shared_ptr &context, ); - protected: - void processNode(const std::shared_ptr &bus, - int framesToProcess) override; +protected: + std::shared_ptr + processNode(const std::shared_ptr &buffer, + int framesToProcess) override; - // highlight-start - private: - double gain; // value responsible for gain value - // highlight-end - }; +// highlight-start +private: + double gain; // value responsible for gain value +// highlight-end +}; } // namespace audioapi ``` @@ -69,20 +70,20 @@ namespace audioapi { ```cpp #include "MyProcessorNode.h" -#include +#include #include namespace audioapi { - MyProcessorNode::MyProcessorNode(BaseAudioContext *context) + MyProcessorNode::MyProcessorNode(const std::shared_ptr &context) //highlight-next-line : AudioNode(context), gain(0.5) { isInitialized_ = true; } - void MyProcessorNode::processNode(const std::shared_ptr &bus, + std::shared_ptr MyProcessorNode::processNode(const std::shared_ptr &buffer, int framesToProcess) { // highlight-start - for (int channel = 0; channel < bus->getNumberOfChannels(); ++channel) { + for (int channel = 0; channel < buffer->getNumberOfChannels(); ++channel) { auto *audioArray = bus->getChannel(channel); for (size_t i = 0; i < framesToProcess; ++i) { // Apply gain to each sample in the audio array diff --git a/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.cpp b/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.cpp index b69d3986c..b1ffb9581 100644 --- a/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.cpp +++ b/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.cpp @@ -1,14 +1,16 @@ #include "MyProcessorNode.h" -#include +#include namespace audioapi { -MyProcessorNode::MyProcessorNode(std::shared_ptr context) +MyProcessorNode::MyProcessorNode( + const std::shared_ptr &context, ) : AudioNode(context) { - isInitialized_ = true; + isInitialized_ = true; } -std::shared_ptr MyProcessorNode::processNode(const std::shared_ptr &bus, - int framesToProcess) { - // put your processing logic here +std::shared_ptr +MyProcessorNode::processNode(const std::shared_ptr &buffer, + int framesToProcess) { + // put your processing logic here } } // namespace audioapi diff --git a/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.h b/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.h index 1f339aaa0..250321092 100644 --- a/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.h +++ b/packages/custom-node-generator/templates/basic/shared/MyProcessorNode.h @@ -2,15 +2,15 @@ #include namespace audioapi { -class AudioBus; +class AudioBuffer; class MyProcessorNode : public AudioNode { public: - explicit MyProcessorNode(std::shared_ptr context); + explicit MyProcessorNode(const std::shared_ptr &context, ); protected: - std::shared_ptr processNode(const std::shared_ptr &bus, - int framesToProcess) override; - + std::shared_ptr + processNode(const std::shared_ptr &buffer, + int framesToProcess) override; }; } // namespace audioapi diff --git a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.cpp b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.cpp index 07d185a85..6be58a7f5 100644 --- a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.cpp +++ b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.cpp @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include @@ -143,7 +143,8 @@ Result AndroidAudioRecorder::start(const std::string & } if (isConnected()) { - deinterleavingBuffer_ = std::make_shared(streamMaxBufferSizeInFrames_); + deinterleavingBuffer_ = std::make_shared( + streamMaxBufferSizeInFrames_, streamChannelCount_, streamSampleRate_); adapterNode_->init(streamMaxBufferSizeInFrames_, streamChannelCount_); } @@ -324,7 +325,8 @@ void AndroidAudioRecorder::connect(const std::shared_ptr &n adapterNode_ = node; if (!isIdle()) { - deinterleavingBuffer_ = std::make_shared(streamMaxBufferSizeInFrames_); + deinterleavingBuffer_ = std::make_shared( + streamMaxBufferSizeInFrames_, streamChannelCount_, streamSampleRate_); adapterNode_->init(streamMaxBufferSizeInFrames_, streamChannelCount_); } @@ -374,13 +376,11 @@ oboe::DataCallbackResult AndroidAudioRecorder::onAudioReady( if (isConnected()) { if (auto adapterLock = Locker::tryLock(adapterNodeMutex_)) { - for (int channel = 0; channel < streamChannelCount_; ++channel) { - for (int frame = 0; frame < numFrames; ++frame) { - deinterleavingBuffer_->getData()[frame] = - static_cast(audioData)[frame * streamChannelCount_ + channel]; - } + auto const data = static_cast(audioData); + deinterleavingBuffer_->deinterleaveFrom(data, numFrames); - adapterNode_->buff_[channel]->write(deinterleavingBuffer_->getData(), numFrames); + for (size_t ch = 0; ch < streamChannelCount_; ++ch) { + adapterNode_->buff_[ch]->write(*deinterleavingBuffer_->getChannel(ch), numFrames); } } } diff --git a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.h b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.h index 5c2ea8bbf..05602cb10 100644 --- a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.h +++ b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AndroidAudioRecorder.h @@ -1,18 +1,18 @@ #pragma once +#include #include +#include #include #include #include +#include #include #include -#include -#include -#include namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioArray; class CircularAudioArray; class AudioFileProperties; @@ -22,14 +22,16 @@ class AudioEventHandlerRegistry; class AndroidAudioRecorder : public oboe::AudioStreamCallback, public AudioRecorder { public: - explicit AndroidAudioRecorder(const std::shared_ptr &audioEventHandlerRegistry); + explicit AndroidAudioRecorder( + const std::shared_ptr &audioEventHandlerRegistry); ~AndroidAudioRecorder() override; void cleanup(); Result start(const std::string &fileNameOverride) override; Result, std::string> stop() override; - Result enableFileOutput(std::shared_ptr properties) override; + Result enableFileOutput( + std::shared_ptr properties) override; void disableFileOutput() override; void pause() override; @@ -38,21 +40,22 @@ class AndroidAudioRecorder : public oboe::AudioStreamCallback, public AudioRecor bool isPaused() const override; bool isIdle() const override; - Result setOnAudioReadyCallback(float sampleRate, size_t bufferLength, int channelCount, uint64_t callbackId) - override; + Result setOnAudioReadyCallback( + float sampleRate, + size_t bufferLength, + int channelCount, + uint64_t callbackId) override; void clearOnAudioReadyCallback() override; void connect(const std::shared_ptr &node) override; void disconnect() override; - oboe::DataCallbackResult onAudioReady( - oboe::AudioStream *oboeStream, - void *audioData, - int32_t numFrames) override; + oboe::DataCallbackResult + onAudioReady(oboe::AudioStream *oboeStream, void *audioData, int32_t numFrames) override; void onErrorAfterClose(oboe::AudioStream *oboeStream, oboe::Result error) override; private: - std::shared_ptr deinterleavingBuffer_; + std::shared_ptr deinterleavingBuffer_; float streamSampleRate_; int32_t streamChannelCount_; diff --git a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.cpp b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.cpp index e112c98a5..6e3bd1692 100644 --- a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.cpp +++ b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include @@ -12,7 +12,7 @@ namespace audioapi { AudioPlayer::AudioPlayer( - const std::function, int)> &renderAudio, + const std::function, int)> &renderAudio, float sampleRate, int channelCount) : renderAudio_(renderAudio), @@ -42,7 +42,7 @@ bool AudioPlayer::openAudioStream() { return false; } - audioBus_ = std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, sampleRate_); + buffer_ = std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, sampleRate_); return true; } @@ -108,21 +108,17 @@ AudioPlayer::onAudioReady(AudioStream *oboeStream, void *audioData, int32_t numF int processedFrames = 0; while (processedFrames < numFrames) { - int framesToProcess = std::min(numFrames - processedFrames, RENDER_QUANTUM_SIZE); + auto framesToProcess = std::min(numFrames - processedFrames, RENDER_QUANTUM_SIZE); if (isRunning_.load(std::memory_order_acquire)) { - renderAudio_(audioBus_, framesToProcess); + renderAudio_(buffer_, framesToProcess); } else { - audioBus_->zero(); + buffer_->zero(); } - for (int i = 0; i < framesToProcess; i++) { - for (int channel = 0; channel < channelCount_; channel++) { - buffer[(processedFrames + i) * channelCount_ + channel] = - audioBus_->getChannel(channel)->getData()[i]; - } - } + float *destination = buffer + (processedFrames * channelCount_); + buffer_->interleaveTo(destination, framesToProcess); processedFrames += framesToProcess; } diff --git a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.h b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.h index 2af28dde0..c453382a5 100644 --- a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.h +++ b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/AudioPlayer.h @@ -12,12 +12,12 @@ namespace audioapi { using namespace oboe; class AudioContext; -class AudioBus; +class AudioBuffer; class AudioPlayer : public AudioStreamDataCallback, AudioStreamErrorCallback { public: AudioPlayer( - const std::function, int)> &renderAudio, + const std::function, int)> &renderAudio, float sampleRate, int channelCount); @@ -34,18 +34,15 @@ class AudioPlayer : public AudioStreamDataCallback, AudioStreamErrorCallback { [[nodiscard]] bool isRunning() const; - DataCallbackResult onAudioReady( - AudioStream *oboeStream, - void *audioData, - int32_t numFrames) override; - - void onErrorAfterClose(AudioStream * /* audioStream */, Result /* error */) + DataCallbackResult onAudioReady(AudioStream *oboeStream, void *audioData, int32_t numFrames) override; + void onErrorAfterClose(AudioStream * /* audioStream */, Result /* error */) override; + private: - std::function, int)> renderAudio_; + std::function, int)> renderAudio_; std::shared_ptr mStream_; - std::shared_ptr audioBus_; + std::shared_ptr buffer_; bool isInitialized_ = false; float sampleRate_; int channelCount_; diff --git a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.cpp b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.cpp index 1a2e3ba56..fa40b19f6 100644 --- a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.cpp +++ b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include @@ -47,8 +47,8 @@ AndroidRecorderCallback::~AndroidRecorderCallback() { processingBufferLength_ = 0; } - for (size_t i = 0; i < circularBus_.size(); ++i) { - circularBus_[i]->zero(); + for (size_t i = 0; i < circularBuffer_.size(); ++i) { + circularBuffer_[i]->zero(); } } @@ -96,7 +96,8 @@ Result AndroidRecorderCallback::prepare( processingBufferLength_ = std::max(processingBufferLength_, (ma_uint64)maxInputBufferLength_); - deinterleavingArray_ = std::make_shared(processingBufferLength_); + deinterleavingBuffer_ = + std::make_shared(processingBufferLength_, channelCount_, sampleRate_); processingBuffer_ = ma_malloc( processingBufferLength_ * channelCount_ * ma_get_bytes_per_sample(ma_format_f32), nullptr); @@ -111,7 +112,7 @@ Result AndroidRecorderCallback::prepare( } void AndroidRecorderCallback::cleanup() { - if (circularBus_[0]->getNumberOfAvailableFrames() > 0) { + if (circularBuffer_[0]->getNumberOfAvailableFrames() > 0) { emitAudioData(true); } @@ -126,8 +127,8 @@ void AndroidRecorderCallback::cleanup() { processingBufferLength_ = 0; } - for (size_t i = 0; i < circularBus_.size(); ++i) { - circularBus_[i]->zero(); + for (size_t i = 0; i < circularBuffer_.size(); ++i) { + circularBuffer_[i]->zero(); } offloader_.reset(); } @@ -149,15 +150,10 @@ void AndroidRecorderCallback::receiveAudioData(void *data, int numFrames) { /// @param numFrames Number of frames in the audio data. void AndroidRecorderCallback::deinterleaveAndPushAudioData(void *data, int numFrames) { auto *inputData = static_cast(data); + deinterleavingBuffer_->deinterleaveFrom(inputData, numFrames); - for (int channel = 0; channel < channelCount_; ++channel) { - float *channelData = deinterleavingArray_->getData(); - - for (int frame = 0; frame < numFrames; ++frame) { - channelData[frame] = inputData[frame * channelCount_ + channel]; - } - - circularBus_[channel]->push_back(channelData, numFrames); + for (size_t ch = 0; ch < channelCount_; ++ch) { + circularBuffer_[ch]->push_back(*deinterleavingBuffer_->getChannel(ch), numFrames); } } @@ -172,7 +168,7 @@ void AndroidRecorderCallback::taskOffloaderFunction(CallbackData callbackData) { streamChannelCount_ == channelCount_) { deinterleaveAndPushAudioData(data, numFrames); - if (circularBus_[0]->getNumberOfAvailableFrames() >= bufferLength_) { + if (circularBuffer_[0]->getNumberOfAvailableFrames() >= bufferLength_) { emitAudioData(); } return; @@ -186,7 +182,7 @@ void AndroidRecorderCallback::taskOffloaderFunction(CallbackData callbackData) { deinterleaveAndPushAudioData(processingBuffer_, static_cast(outputFrameCount)); - if (circularBus_[0]->getNumberOfAvailableFrames() >= bufferLength_) { + if (circularBuffer_[0]->getNumberOfAvailableFrames() >= bufferLength_) { emitAudioData(); } } diff --git a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.h b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.h index 8c4c6e81a..c3854bf91 100644 --- a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.h +++ b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AndroidRecorderCallback.h @@ -1,16 +1,15 @@ #pragma once - -#include #include +#include #include #include -#include #include +#include namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioArray; class CircularAudioArray; class AudioEventHandlerRegistry; @@ -30,7 +29,8 @@ class AndroidRecorderCallback : public AudioRecorderCallback { uint64_t callbackId); ~AndroidRecorderCallback() override; - Result prepare(float streamSampleRate, int streamChannelCount, size_t maxInputBufferLength); + Result + prepare(float streamSampleRate, int streamChannelCount, size_t maxInputBufferLength); void cleanup() override; void receiveAudioData(void *data, int numFrames); @@ -44,7 +44,7 @@ class AndroidRecorderCallback : public AudioRecorderCallback { ma_uint64 processingBufferLength_{0}; std::unique_ptr converter_{nullptr}; - std::shared_ptr deinterleavingArray_; + std::shared_ptr deinterleavingBuffer_; void deinterleaveAndPushAudioData(void *data, int numFrames); diff --git a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AudioDecoder.cpp b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AudioDecoder.cpp index da0c8ae6f..d7b3dd04a 100644 --- a/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AudioDecoder.cpp +++ b/packages/react-native-audio-api/android/src/main/cpp/audioapi/android/core/utils/AudioDecoder.cpp @@ -1,9 +1,8 @@ -#include #include #include #include #include -#include +#include #include #include @@ -54,15 +53,11 @@ std::shared_ptr AudioDecoder::makeAudioBufferFromFloatBuffer( } auto outputFrames = buffer.size() / outputChannels; - auto audioBus = std::make_shared(outputFrames, outputChannels, outputSampleRate); + auto audioBuffer = std::make_shared(outputFrames, outputChannels, outputSampleRate); - for (int ch = 0; ch < outputChannels; ++ch) { - auto channelData = audioBus->getChannel(ch)->getData(); - for (int i = 0; i < outputFrames; ++i) { - channelData[i] = buffer[i * outputChannels + ch]; - } - } - return std::make_shared(audioBus); + audioBuffer->deinterleaveFrom(buffer.data(), outputFrames); + + return audioBuffer; } std::shared_ptr AudioDecoder::decodeWithFilePath( @@ -162,10 +157,11 @@ std::shared_ptr AudioDecoder::decodeWithPCMInBase64( const auto uint8Data = reinterpret_cast(decodedData.data()); size_t numFramesDecoded = decodedData.size() / (inputChannelCount * sizeof(int16_t)); - auto audioBus = std::make_shared(numFramesDecoded, inputChannelCount, inputSampleRate); + auto audioBuffer = + std::make_shared(numFramesDecoded, inputChannelCount, inputSampleRate); - for (int ch = 0; ch < inputChannelCount; ++ch) { - auto channelData = audioBus->getChannel(ch)->getData(); + for (size_t ch = 0; ch < inputChannelCount; ++ch) { + auto channelData = audioBuffer->getChannel(ch)->span(); for (size_t i = 0; i < numFramesDecoded; ++i) { size_t offset; @@ -180,7 +176,7 @@ std::shared_ptr AudioDecoder::decodeWithPCMInBase64( channelData[i] = uint8ToFloat(uint8Data[offset], uint8Data[offset + 1]); } } - return std::make_shared(audioBus); + return audioBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/AudioAPIModuleInstaller.h b/packages/react-native-audio-api/common/cpp/audioapi/AudioAPIModuleInstaller.h index fb2543d4d..c58ee2519 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/AudioAPIModuleInstaller.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/AudioAPIModuleInstaller.h @@ -9,8 +9,8 @@ #include #include #include -#include #include +#include #include #include @@ -197,7 +197,7 @@ class AudioAPIModuleInstaller { auto length = static_cast(args[1].getNumber()); auto sampleRate = static_cast(args[2].getNumber()); - auto audioBuffer = std::make_shared(numberOfChannels, length, sampleRate); + auto audioBuffer = std::make_shared(length, numberOfChannels, sampleRate); auto audioBufferHostObject = std::make_shared(audioBuffer); return jsi::Object::createFromHostObject(runtime, audioBufferHostObject); }); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioNodeHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioNodeHostObject.cpp index 8976e9398..ae841ed4f 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioNodeHostObject.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioNodeHostObject.cpp @@ -36,7 +36,7 @@ JSI_PROPERTY_GETTER_IMPL(AudioNodeHostObject, numberOfOutputs) { } JSI_PROPERTY_GETTER_IMPL(AudioNodeHostObject, channelCount) { - return {node_->getChannelCount()}; + return {static_cast(node_->getChannelCount())}; } JSI_PROPERTY_GETTER_IMPL(AudioNodeHostObject, channelCountMode) { diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioParamHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioParamHostObject.cpp index 45508e702..6d501e25d 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioParamHostObject.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/AudioParamHostObject.cpp @@ -1,6 +1,8 @@ #include #include +#include + #include #include @@ -80,7 +82,7 @@ JSI_HOST_FUNCTION_IMPL(AudioParamHostObject, setValueCurveAtTime) { args[0].getObject(runtime).getPropertyAsObject(runtime, "buffer").getArrayBuffer(runtime); auto rawValues = reinterpret_cast(arrayBuffer.data(runtime)); auto length = static_cast(arrayBuffer.size(runtime)); - auto values = std::make_unique>(rawValues, rawValues + length); + auto values = std::make_unique(rawValues, length); double startTime = args[1].getNumber(); double duration = args[2].getNumber(); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp index 9cb1d072f..3942b44a0 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/BaseAudioContextHostObject.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include #include #include @@ -11,6 +9,8 @@ #include #include #include +#include +#include #include #include #include @@ -77,7 +77,8 @@ JSI_PROPERTY_GETTER_IMPL(BaseAudioContextHostObject, destination) { } JSI_PROPERTY_GETTER_IMPL(BaseAudioContextHostObject, state) { - return jsi::String::createFromUtf8(runtime, js_enum_parser::contextStateToString(context_->getState())); + return jsi::String::createFromUtf8( + runtime, js_enum_parser::contextStateToString(context_->getState())); } JSI_PROPERTY_GETTER_IMPL(BaseAudioContextHostObject, sampleRate) { @@ -295,7 +296,7 @@ JSI_HOST_FUNCTION_IMPL(BaseAudioContextHostObject, createConvolver) { const auto convolverOptions = audioapi::option_parser::parseConvolverOptions(runtime, options); auto convolverHostObject = std::make_shared(context_, convolverOptions); auto jsiObject = jsi::Object::createFromHostObject(runtime, convolverHostObject); - if (convolverOptions.bus != nullptr) { + if (convolverOptions.buffer != nullptr) { auto bufferHostObject = options.getProperty(runtime, "buffer") .getObject(runtime) .asHostObject(runtime); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.cpp index f1351cdcf..05ff69806 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/effects/WaveShaperNodeHostObject.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include @@ -37,8 +37,7 @@ JSI_PROPERTY_GETTER_IMPL(WaveShaperNodeHostObject, curve) { } // copy AudioArray holding curve data to avoid subsequent modifications - auto audioArray = std::make_shared(*curve); - auto audioArrayBuffer = std::make_shared(audioArray); + auto audioArrayBuffer = std::make_shared(*curve); auto arrayBuffer = jsi::ArrayBuffer(runtime, audioArrayBuffer); auto float32ArrayCtor = runtime.global().getPropertyAsFunction(runtime, "Float32Array"); @@ -58,14 +57,14 @@ JSI_HOST_FUNCTION_IMPL(WaveShaperNodeHostObject, setCurve) { auto waveShaperNode = std::static_pointer_cast(node_); if (args[0].isNull()) { - waveShaperNode->setCurve(std::shared_ptr(nullptr)); + waveShaperNode->setCurve(nullptr); return jsi::Value::undefined(); } auto arrayBuffer = args[0].getObject(runtime).getPropertyAsObject(runtime, "buffer").getArrayBuffer(runtime); - auto curve = std::make_shared( + auto curve = std::make_shared( reinterpret_cast(arrayBuffer.data(runtime)), static_cast(arrayBuffer.size(runtime) / sizeof(float))); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/inputs/AudioRecorderHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/inputs/AudioRecorderHostObject.cpp index 10976563b..9ea3aeb0e 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/inputs/AudioRecorderHostObject.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/inputs/AudioRecorderHostObject.cpp @@ -3,10 +3,10 @@ #include #include #include -#include #include -#include #include +#include +#include #ifdef ANDROID #include #else diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.cpp b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.cpp index 91a7c577a..63ddeed69 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.cpp @@ -1,7 +1,7 @@ #include -#include -#include +#include +#include #include #include @@ -30,7 +30,7 @@ JSI_PROPERTY_GETTER_IMPL(AudioBufferHostObject, sampleRate) { } JSI_PROPERTY_GETTER_IMPL(AudioBufferHostObject, length) { - return {static_cast(audioBuffer_->getLength())}; + return {static_cast(audioBuffer_->getSize())}; } JSI_PROPERTY_GETTER_IMPL(AudioBufferHostObject, duration) { @@ -38,13 +38,12 @@ JSI_PROPERTY_GETTER_IMPL(AudioBufferHostObject, duration) { } JSI_PROPERTY_GETTER_IMPL(AudioBufferHostObject, numberOfChannels) { - return {audioBuffer_->getNumberOfChannels()}; + return { static_cast(audioBuffer_->getNumberOfChannels()) }; } JSI_HOST_FUNCTION_IMPL(AudioBufferHostObject, getChannelData) { auto channel = static_cast(args[0].getNumber()); - auto audioArrayBuffer = - std::make_shared(audioBuffer_->bus_->getSharedChannel(channel)); + auto audioArrayBuffer = audioBuffer_->getSharedChannel(channel); auto arrayBuffer = jsi::ArrayBuffer(runtime, audioArrayBuffer); auto float32ArrayCtor = runtime.global().getPropertyAsFunction(runtime, "Float32Array"); @@ -59,11 +58,11 @@ JSI_HOST_FUNCTION_IMPL(AudioBufferHostObject, copyFromChannel) { auto arrayBuffer = args[0].getObject(runtime).getPropertyAsObject(runtime, "buffer").getArrayBuffer(runtime); auto destination = reinterpret_cast(arrayBuffer.data(runtime)); - auto length = static_cast(arrayBuffer.size(runtime)); + auto length = arrayBuffer.size(runtime) / sizeof(float); auto channelNumber = static_cast(args[1].getNumber()); auto startInChannel = static_cast(args[2].getNumber()); - audioBuffer_->copyFromChannel(destination, length, channelNumber, startInChannel); + audioBuffer_->getChannel(channelNumber)->copyTo(destination, startInChannel, 0, length); return jsi::Value::undefined(); } @@ -72,11 +71,11 @@ JSI_HOST_FUNCTION_IMPL(AudioBufferHostObject, copyToChannel) { auto arrayBuffer = args[0].getObject(runtime).getPropertyAsObject(runtime, "buffer").getArrayBuffer(runtime); auto source = reinterpret_cast(arrayBuffer.data(runtime)); - auto length = static_cast(arrayBuffer.size(runtime)); + auto length = arrayBuffer.size(runtime) / sizeof(float); auto channelNumber = static_cast(args[1].getNumber()); auto startInChannel = static_cast(args[2].getNumber()); - audioBuffer_->copyToChannel(source, length, channelNumber, startInChannel); + audioBuffer_->getChannel(channelNumber)->copy(source, 0, startInChannel, length); return jsi::Value::undefined(); } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.h index 7bd601004..09d440d55 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/sources/AudioBufferHostObject.h @@ -1,7 +1,7 @@ #pragma once -#include #include +#include #include #include @@ -29,7 +29,7 @@ class AudioBufferHostObject : public JsiHostObject { } [[nodiscard]] inline size_t getSizeInBytes() const { - return audioBuffer_->getLength() * audioBuffer_->getNumberOfChannels() * sizeof(float); + return audioBuffer_->getSize() * audioBuffer_->getNumberOfChannels() * sizeof(float); } JSI_PROPERTY_GETTER_DECL(sampleRate); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptionsParser.h b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptionsParser.h index d98b386fe..75136b83f 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptionsParser.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/HostObjects/utils/NodeOptionsParser.h @@ -81,7 +81,7 @@ ConvolverOptions parseConvolverOptions(jsi::Runtime &runtime, const jsi::Object auto bufferHostObject = optionsObject.getProperty(runtime, "buffer") .getObject(runtime) .asHostObject(runtime); - options.bus = bufferHostObject->audioBuffer_; + options.buffer = bufferHostObject->audioBuffer_; } return options; } @@ -338,7 +338,7 @@ WaveShaperOptions parseWaveShaperOptions(jsi::Runtime &runtime, const jsi::Objec if (optionsObject.hasProperty(runtime, "buffer")) { auto arrayBuffer = optionsObject.getPropertyAsObject(runtime, "buffer").getArrayBuffer(runtime); - options.curve = std::make_shared( + options.curve = std::make_shared( reinterpret_cast(arrayBuffer.data(runtime)), static_cast(arrayBuffer.size(runtime) / sizeof(float))); } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.cpp index 7e90d9a36..853f5633c 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.cpp @@ -16,7 +16,7 @@ AudioContext::AudioContext( const std::shared_ptr &audioEventHandlerRegistry, const RuntimeRegistry &runtimeRegistry) : BaseAudioContext(sampleRate, audioEventHandlerRegistry, runtimeRegistry), - isInitialized_(false) {} + isInitialized_(false) {} AudioContext::~AudioContext() { if (getState() != ContextState::CLOSED) { @@ -90,8 +90,8 @@ bool AudioContext::start() { return false; } -std::function, int)> AudioContext::renderAudio() { - return [this](const std::shared_ptr &data, int frames) { +std::function, int)> AudioContext::renderAudio() { + return [this](const std::shared_ptr &data, int frames) { destination_->renderAudio(data, frames); }; } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.h b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.h index c1eced267..967b38426 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioContext.h @@ -37,7 +37,7 @@ class AudioContext : public BaseAudioContext { bool isDriverRunning() const override; - std::function, int)> renderAudio(); + std::function, int)> renderAudio(); }; } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp index 7a7a8e7c9..94959779c 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.cpp @@ -1,10 +1,10 @@ -#include #include #include #include #include +#include #include -#include +#include #include #include #include @@ -17,12 +17,12 @@ AudioNode::AudioNode( : context_(context), numberOfInputs_(options.numberOfInputs), numberOfOutputs_(options.numberOfOutputs), - requiresTailProcessing_(options.requiresTailProcessing), channelCount_(options.channelCount), channelCountMode_(options.channelCountMode), - channelInterpretation_(options.channelInterpretation) { - audioBus_ = - std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); + channelInterpretation_(options.channelInterpretation), + requiresTailProcessing_(options.requiresTailProcessing) { + audioBuffer_ = + std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); } AudioNode::~AudioNode() { @@ -39,7 +39,7 @@ int AudioNode::getNumberOfOutputs() const { return numberOfOutputs_; } -int AudioNode::getChannelCount() const { +size_t AudioNode::getChannelCount() const { return channelCount_; } @@ -53,36 +53,36 @@ ChannelInterpretation AudioNode::getChannelInterpretation() const { void AudioNode::connect(const std::shared_ptr &node) { if (std::shared_ptr context = context_.lock()) { - context->getGraphManager()->addPendingNodeConnection( - shared_from_this(), node, AudioGraphManager::ConnectionType::CONNECT); + context->getGraphManager()->addPendingNodeConnection( + shared_from_this(), node, AudioGraphManager::ConnectionType::CONNECT); } } void AudioNode::connect(const std::shared_ptr ¶m) { if (std::shared_ptr context = context_.lock()) { - context->getGraphManager()->addPendingParamConnection( - shared_from_this(), param, AudioGraphManager::ConnectionType::CONNECT); + context->getGraphManager()->addPendingParamConnection( + shared_from_this(), param, AudioGraphManager::ConnectionType::CONNECT); } } void AudioNode::disconnect() { if (std::shared_ptr context = context_.lock()) { - context->getGraphManager()->addPendingNodeConnection( - shared_from_this(), nullptr, AudioGraphManager::ConnectionType::DISCONNECT_ALL); + context->getGraphManager()->addPendingNodeConnection( + shared_from_this(), nullptr, AudioGraphManager::ConnectionType::DISCONNECT_ALL); } } void AudioNode::disconnect(const std::shared_ptr &node) { if (std::shared_ptr context = context_.lock()) { - context->getGraphManager()->addPendingNodeConnection( - shared_from_this(), node, AudioGraphManager::ConnectionType::DISCONNECT); + context->getGraphManager()->addPendingNodeConnection( + shared_from_this(), node, AudioGraphManager::ConnectionType::DISCONNECT); } } void AudioNode::disconnect(const std::shared_ptr ¶m) { if (std::shared_ptr context = context_.lock()) { - context->getGraphManager()->addPendingParamConnection( - shared_from_this(), param, AudioGraphManager::ConnectionType::DISCONNECT); + context->getGraphManager()->addPendingParamConnection( + shared_from_this(), param, AudioGraphManager::ConnectionType::DISCONNECT); } } @@ -118,31 +118,31 @@ void AudioNode::disable() { } } -std::shared_ptr AudioNode::processAudio( - const std::shared_ptr &outputBus, +std::shared_ptr AudioNode::processAudio( + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed) { if (!isInitialized_) { - return outputBus; + return outputBuffer; } if (checkIsAlreadyProcessed && isAlreadyProcessed()) { - return audioBus_; + return audioBuffer_; } - // Process inputs and return the bus with the most channels. - auto processingBus = processInputs(outputBus, framesToProcess, checkIsAlreadyProcessed); + // Process inputs and return the buffer with the most channels. + auto processingBuffer = processInputs(outputBuffer, framesToProcess, checkIsAlreadyProcessed); // Apply channel count mode. - processingBus = applyChannelCountMode(processingBus); + processingBuffer = applyChannelCountMode(processingBuffer); - // Mix all input buses into the processing bus. - mixInputsBuses(processingBus); + // Mix all input buffers into the processing buffer. + mixInputsBuffers(processingBuffer); - assert(processingBus != nullptr); + assert(processingBuffer != nullptr); // Finally, process the node itself. - return processNode(processingBus, framesToProcess); + return processNode(processingBuffer, framesToProcess); } bool AudioNode::isAlreadyProcessed() { @@ -164,14 +164,14 @@ bool AudioNode::isAlreadyProcessed() { return true; } -std::shared_ptr AudioNode::processInputs( - const std::shared_ptr &outputBus, +std::shared_ptr AudioNode::processInputs( + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed) { - auto processingBus = audioBus_; - processingBus->zero(); + auto processingBuffer = audioBuffer_; + processingBuffer->zero(); - int maxNumberOfChannels = 0; + size_t maxNumberOfChannels = 0; for (auto it = inputNodes_.begin(), end = inputNodes_.end(); it != end; ++it) { auto inputNode = *it; assert(inputNode != nullptr); @@ -180,44 +180,45 @@ std::shared_ptr AudioNode::processInputs( continue; } - auto inputBus = inputNode->processAudio(outputBus, framesToProcess, checkIsAlreadyProcessed); - inputBuses_.push_back(inputBus); + auto inputBuffer = + inputNode->processAudio(outputBuffer, framesToProcess, checkIsAlreadyProcessed); + inputBuffers_.push_back(inputBuffer); - if (maxNumberOfChannels < inputBus->getNumberOfChannels()) { - maxNumberOfChannels = inputBus->getNumberOfChannels(); - processingBus = inputBus; + if (maxNumberOfChannels < inputBuffer->getNumberOfChannels()) { + maxNumberOfChannels = inputBuffer->getNumberOfChannels(); + processingBuffer = inputBuffer; } } - return processingBus; + return processingBuffer; } -std::shared_ptr AudioNode::applyChannelCountMode( - const std::shared_ptr &processingBus) { +std::shared_ptr AudioNode::applyChannelCountMode( + const std::shared_ptr &processingBuffer) { // If the channelCountMode is EXPLICIT, the node should output the number of // channels specified by the channelCount. if (channelCountMode_ == ChannelCountMode::EXPLICIT) { - return audioBus_; + return audioBuffer_; } // If the channelCountMode is CLAMPED_MAX, the node should output the maximum // number of channels clamped to channelCount. if (channelCountMode_ == ChannelCountMode::CLAMPED_MAX && - processingBus->getNumberOfChannels() >= channelCount_) { - return audioBus_; + processingBuffer->getNumberOfChannels() >= channelCount_) { + return audioBuffer_; } - return processingBus; + return processingBuffer; } -void AudioNode::mixInputsBuses(const std::shared_ptr &processingBus) { - assert(processingBus != nullptr); +void AudioNode::mixInputsBuffers(const std::shared_ptr &processingBuffer) { + assert(processingBuffer != nullptr); - for (auto it = inputBuses_.begin(), end = inputBuses_.end(); it != end; ++it) { - processingBus->sum(it->get(), channelInterpretation_); + for (auto it = inputBuffers_.begin(), end = inputBuffers_.end(); it != end; ++it) { + processingBuffer->sum(**it, channelInterpretation_); } - inputBuses_.clear(); + inputBuffers_.clear(); } void AudioNode::connectNode(const std::shared_ptr &node) { diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h index 58cfe64d9..e63c83c1d 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioNode.h @@ -14,7 +14,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class BaseAudioContext; class AudioParam; @@ -27,7 +27,7 @@ class AudioNode : public std::enable_shared_from_this { int getNumberOfInputs() const; int getNumberOfOutputs() const; - int getChannelCount() const; + size_t getChannelCount() const; ChannelCountMode getChannelCountMode() const; ChannelInterpretation getChannelInterpretation() const; void connect(const std::shared_ptr &node); @@ -35,8 +35,8 @@ class AudioNode : public std::enable_shared_from_this { void disconnect(); void disconnect(const std::shared_ptr &node); void disconnect(const std::shared_ptr ¶m); - virtual std::shared_ptr processAudio( - const std::shared_ptr &outputBus, + virtual std::shared_ptr processAudio( + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed); @@ -52,19 +52,19 @@ class AudioNode : public std::enable_shared_from_this { friend class DelayNodeHostObject; std::weak_ptr context_; - std::shared_ptr audioBus_; + std::shared_ptr audioBuffer_; + + const int numberOfInputs_ = 1; + const int numberOfOutputs_ = 1; + size_t channelCount_ = 2; + const ChannelCountMode channelCountMode_ = ChannelCountMode::MAX; + const ChannelInterpretation channelInterpretation_ = ChannelInterpretation::SPEAKERS; + const bool requiresTailProcessing_; std::unordered_set inputNodes_ = {}; std::unordered_set> outputNodes_ = {}; std::unordered_set> outputParams_ = {}; - const int numberOfInputs_; - const int numberOfOutputs_; - int channelCount_; - const bool requiresTailProcessing_; - const ChannelCountMode channelCountMode_; - const ChannelInterpretation channelInterpretation_; - int numberOfEnabledInputNodes_ = 0; bool isInitialized_ = false; bool isEnabled_ = true; @@ -72,17 +72,18 @@ class AudioNode : public std::enable_shared_from_this { std::size_t lastRenderedFrame_{SIZE_MAX}; private: - std::vector> inputBuses_ = {}; + std::vector> inputBuffers_ = {}; - virtual std::shared_ptr processInputs( - const std::shared_ptr &outputBus, + virtual std::shared_ptr processInputs( + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed); - virtual std::shared_ptr processNode(const std::shared_ptr &, int) = 0; + virtual std::shared_ptr processNode(const std::shared_ptr &, int) = 0; bool isAlreadyProcessed(); - std::shared_ptr applyChannelCountMode(const std::shared_ptr &processingBus); - void mixInputsBuses(const std::shared_ptr &processingBus); + std::shared_ptr applyChannelCountMode( + const std::shared_ptr &processingBuffer); + void mixInputsBuffers(const std::shared_ptr &processingBuffer); void connectNode(const std::shared_ptr &node); void disconnectNode(const std::shared_ptr &node); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.cpp index c0873dbe8..be3b6f82c 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.cpp @@ -1,6 +1,6 @@ #include #include -#include +#include #include #include #include @@ -12,7 +12,7 @@ AudioParam::AudioParam( float defaultValue, float minValue, float maxValue, - const std::shared_ptr& context) + const std::shared_ptr &context) : context_(context), value_(defaultValue), defaultValue_(defaultValue), @@ -24,8 +24,9 @@ AudioParam::AudioParam( endTime_(0), startValue_(defaultValue), endValue_(defaultValue), - audioBus_(std::make_shared(RENDER_QUANTUM_SIZE, 1, context->getSampleRate())) { - inputBuses_.reserve(4); + audioBuffer_( + std::make_shared(RENDER_QUANTUM_SIZE, 1, context->getSampleRate())) { + inputBuffers_.reserve(4); inputNodes_.reserve(4); // Default calculation function just returns the static value calculateValue_ = [this](double, double, float, float, double) { @@ -174,7 +175,7 @@ void AudioParam::setTargetAtTime(float target, double startTime, double timeCons } void AudioParam::setValueCurveAtTime( - const std::shared_ptr>& values, + const std::shared_ptr &values, size_t length, double startTime, double duration) { @@ -197,7 +198,7 @@ void AudioParam::setValueCurveAtTime( // Calculate interpolation factor between adjacent array elements auto factor = static_cast( (time - startTime) * static_cast(length - 1) / (endTime - startTime) - k); - return dsp::linearInterpolate(values->data(), k, k + 1, factor); + return dsp::linearInterpolate(values->span(), k, k + 1, factor); } return endValue; @@ -207,7 +208,7 @@ void AudioParam::setValueCurveAtTime( startTime, startTime + duration, param.getQueueEndValue(), - values->at(length - 1), + values->span()[length - 1], std::move(calculateValue), ParamChangeEventType::SET_VALUE_CURVE)); }; @@ -242,27 +243,27 @@ void AudioParam::removeInputNode(AudioNode *node) { } } -std::shared_ptr AudioParam::calculateInputs( - const std::shared_ptr &processingBus, +std::shared_ptr AudioParam::calculateInputs( + const std::shared_ptr &processingBuffer, int framesToProcess) { - processingBus->zero(); + processingBuffer->zero(); if (inputNodes_.empty()) { - return processingBus; + return processingBuffer; } - processInputs(processingBus, framesToProcess, true); - mixInputsBuses(processingBus); - return processingBus; + processInputs(processingBuffer, framesToProcess, true); + mixInputsBuffers(processingBuffer); + return processingBuffer; } -std::shared_ptr AudioParam::processARateParam(int framesToProcess, double time) { +std::shared_ptr AudioParam::processARateParam(int framesToProcess, double time) { processScheduledEvents(); - auto processingBus = calculateInputs(audioBus_, framesToProcess); + auto processingBuffer = calculateInputs(audioBuffer_, framesToProcess); std::shared_ptr context = context_.lock(); if (context == nullptr) - return processingBus; + return processingBuffer; float sampleRate = context->getSampleRate(); - float *busData = processingBus->getChannel(0)->getData(); + auto bufferData = processingBuffer->getChannel(0)->span(); float timeCache = time; float timeStep = 1.0f / sampleRate; float sample = 0.0f; @@ -270,22 +271,22 @@ std::shared_ptr AudioParam::processARateParam(int framesToProcess, dou // Add automated parameter value to each sample for (size_t i = 0; i < framesToProcess; i++, timeCache += timeStep) { sample = getValueAtTime(timeCache); - busData[i] += sample; + bufferData[i] += sample; } - // processingBus is a mono bus containing per-sample parameter values - return processingBus; + // processingBuffer is a mono buffer containing per-sample parameter values + return processingBuffer; } float AudioParam::processKRateParam(int framesToProcess, double time) { processScheduledEvents(); - auto processingBus = calculateInputs(audioBus_, framesToProcess); + auto processingBuffer = calculateInputs(audioBuffer_, framesToProcess); // Return block-rate parameter value plus first sample of input modulation - return processingBus->getChannel(0)->getData()[0] + getValueAtTime(time); + return processingBuffer->getChannel(0)->span()[0] + getValueAtTime(time); } void AudioParam::processInputs( - const std::shared_ptr &outputBus, + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed) { for (auto it = inputNodes_.begin(), end = inputNodes_.end(); it != end; ++it) { @@ -296,22 +297,23 @@ void AudioParam::processInputs( continue; } - // Process this input node and store its output bus - auto inputBus = inputNode->processAudio(outputBus, framesToProcess, checkIsAlreadyProcessed); - inputBuses_.emplace_back(inputBus); + // Process this input node and store its output buffer + auto inputBuffer = + inputNode->processAudio(outputBuffer, framesToProcess, checkIsAlreadyProcessed); + inputBuffers_.emplace_back(inputBuffer); } } -void AudioParam::mixInputsBuses(const std::shared_ptr &processingBus) { - assert(processingBus != nullptr); +void AudioParam::mixInputsBuffers(const std::shared_ptr &processingBuffer) { + assert(processingBuffer != nullptr); - // Sum all input buses into the processing bus - for (auto it = inputBuses_.begin(), end = inputBuses_.end(); it != end; ++it) { - processingBus->sum(it->get(), ChannelInterpretation::SPEAKERS); + // Sum all input buffers into the processing buffer + for (auto it = inputBuffers_.begin(), end = inputBuffers_.end(); it != end; ++it) { + processingBuffer->sum(**it, ChannelInterpretation::SPEAKERS); } // Clear for next processing cycle - inputBuses_.clear(); + inputBuffers_.clear(); } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.h b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.h index 0f53b8a7e..7f5b7130e 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/AudioParam.h @@ -3,8 +3,8 @@ #include #include #include -#include -#include +#include +#include #include #include @@ -65,7 +65,7 @@ class AudioParam { // JS-Thread only void setValueCurveAtTime( - const std::shared_ptr> &values, + const std::shared_ptr &values, size_t length, double startTime, double duration); @@ -86,7 +86,7 @@ class AudioParam { void removeInputNode(AudioNode *node); // Audio-Thread only - std::shared_ptr processARateParam(int framesToProcess, double time); + std::shared_ptr processARateParam(int framesToProcess, double time); // Audio-Thread only float processKRateParam(int framesToProcess, double time); @@ -111,8 +111,8 @@ class AudioParam { // Input modulation system std::vector inputNodes_; - std::shared_ptr audioBus_; - std::vector> inputBuses_; + std::shared_ptr audioBuffer_; + std::vector> inputBuffers_; /// @brief Get the end time of the parameter queue. /// @return The end time of the parameter queue or last endTime_ if queue is empty. @@ -145,12 +145,12 @@ class AudioParam { } float getValueAtTime(double time); void processInputs( - const std::shared_ptr &outputBus, + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed); - void mixInputsBuses(const std::shared_ptr &processingBus); - std::shared_ptr calculateInputs( - const std::shared_ptr &processingBus, + void mixInputsBuffers(const std::shared_ptr &processingBuffer); + std::shared_ptr calculateInputs( + const std::shared_ptr &processingBuffer, int framesToProcess); }; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp index 8c26da911..f119be1d5 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.cpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -26,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -78,7 +77,7 @@ std::shared_ptr BaseAudioContext::getDestination() const { } void BaseAudioContext::setState(audioapi::ContextState state) { - state_.store(state, std::memory_order_release); + state_.store(state, std::memory_order_release); } std::shared_ptr BaseAudioContext::createWorkletSourceNode( @@ -122,7 +121,8 @@ std::shared_ptr BaseAudioContext::createRecorderAdapter() { return recorderAdapter; } -std::shared_ptr BaseAudioContext::createOscillator(const OscillatorOptions &options) { +std::shared_ptr BaseAudioContext::createOscillator( + const OscillatorOptions &options) { auto oscillator = std::make_shared(shared_from_this(), options); graphManager_->addSourceNode(oscillator); return oscillator; @@ -210,7 +210,8 @@ std::shared_ptr BaseAudioContext::createConvolver(const Convolver return convolver; } -std::shared_ptr BaseAudioContext::createWaveShaper(const WaveShaperOptions &options) { +std::shared_ptr BaseAudioContext::createWaveShaper( + const WaveShaperOptions &options) { auto waveShaper = std::make_shared(shared_from_this(), options); graphManager_->addProcessingNode(waveShaper); return waveShaper; @@ -257,7 +258,7 @@ std::shared_ptr BaseAudioContext::getAudioEventHandl } const RuntimeRegistry &BaseAudioContext::getRuntimeRegistry() const { - return runtimeRegistry_; + return runtimeRegistry_; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h index f68b5afd5..ff7da2d28 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/BaseAudioContext.h @@ -16,7 +16,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class GainNode; class DelayNode; class AudioBuffer; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.cpp index 0b771c973..b5deac443 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.cpp @@ -2,12 +2,11 @@ #include #include -#include #include #include #include #include -#include +#include #include #include @@ -28,10 +27,10 @@ OfflineAudioContext::OfflineAudioContext( length_(length), numberOfChannels_(numberOfChannels), currentSampleFrame_(0), - resultBus_(std::make_shared(length, numberOfChannels, sampleRate)) {} + resultBuffer_(std::make_shared(length, numberOfChannels, sampleRate)) {} OfflineAudioContext::~OfflineAudioContext() { - getGraphManager()->cleanup(); + getGraphManager()->cleanup(); } void OfflineAudioContext::resume() { @@ -65,21 +64,17 @@ void OfflineAudioContext::renderAudio() { setState(ContextState::RUNNING); std::thread([this]() { - auto audioBus = std::make_shared(RENDER_QUANTUM_SIZE, numberOfChannels_, getSampleRate()); + auto audioBuffer = + std::make_shared(RENDER_QUANTUM_SIZE, numberOfChannels_, getSampleRate()); while (currentSampleFrame_ < length_) { Locker locker(mutex_); int framesToProcess = std::min(static_cast(length_ - currentSampleFrame_), RENDER_QUANTUM_SIZE); - destination_->renderAudio(audioBus, framesToProcess); + destination_->renderAudio(audioBuffer, framesToProcess); - for (int i = 0; i < framesToProcess; i++) { - for (int channel = 0; channel < numberOfChannels_; channel += 1) { - resultBus_->getChannel(channel)->getData()[currentSampleFrame_ + i] = - audioBus->getChannel(channel)->getData()[i]; - } - } + resultBuffer_->copy(*audioBuffer, 0, currentSampleFrame_, framesToProcess); currentSampleFrame_ += framesToProcess; @@ -96,8 +91,7 @@ void OfflineAudioContext::renderAudio() { } // Rendering completed - auto buffer = std::make_shared(resultBus_); - resultCallback_(buffer); + resultCallback_(resultBuffer_); }).detach(); } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.h b/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.h index a33fd384a..aac15b052 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/OfflineAudioContext.h @@ -38,7 +38,7 @@ class OfflineAudioContext : public BaseAudioContext { int numberOfChannels_; size_t currentSampleFrame_; - std::shared_ptr resultBus_; + std::shared_ptr resultBuffer_; void renderAudio(); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp index fbfd5ff3a..dd93c628a 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.cpp @@ -1,11 +1,11 @@ -#include #include #include -#include +#include #include -#include +#include +#include #include -#include +#include #include #include @@ -24,12 +24,13 @@ AnalyserNode::AnalyserNode( maxDecibels_(options.maxDecibels), smoothingTimeConstant_(options.smoothingTimeConstant), windowType_(WindowType::BLACKMAN), - inputBuffer_(std::make_unique(MAX_FFT_SIZE * 2)), - downMixBus_(std::make_unique(RENDER_QUANTUM_SIZE, 1, context->getSampleRate())), - tempBuffer_(std::make_unique(fftSize_)), + inputArray_(std::make_unique(MAX_FFT_SIZE * 2)), + downMixBuffer_( + std::make_unique(RENDER_QUANTUM_SIZE, 1, context->getSampleRate())), + tempArray_(std::make_unique(fftSize_)), fft_(std::make_unique(fftSize_)), complexData_(std::vector>(fftSize_)), - magnitudeBuffer_(std::make_unique(fftSize_ / 2)) { + magnitudeArray_(std::make_unique(fftSize_ / 2)) { setWindowData(windowType_, fftSize_); isInitialized_ = true; } @@ -66,8 +67,8 @@ void AnalyserNode::setFftSize(int fftSize) { fftSize_ = fftSize; fft_ = std::make_unique(fftSize_); complexData_ = std::vector>(fftSize_); - magnitudeBuffer_ = std::make_unique(fftSize_ / 2); - tempBuffer_ = std::make_unique(fftSize_); + magnitudeArray_ = std::make_unique(fftSize_ / 2); + tempArray_ = std::make_unique(fftSize_); setWindowData(windowType_, fftSize_); } @@ -90,15 +91,19 @@ void AnalyserNode::setWindowType(AnalyserNode::WindowType type) { void AnalyserNode::getFloatFrequencyData(float *data, int length) { doFFTAnalysis(); - length = std::min(static_cast(magnitudeBuffer_->getSize()), length); - dsp::linearToDecibels(magnitudeBuffer_->getData(), data, length); + length = std::min(static_cast(magnitudeArray_->getSize()), length); + auto magnitudeSpan = magnitudeArray_->span(); + + for (int i = 0; i < length; i++) { + data[i] = dsp::linearToDecibels(magnitudeSpan[i]); + } } void AnalyserNode::getByteFrequencyData(uint8_t *data, int length) { doFFTAnalysis(); - auto magnitudeBufferData = magnitudeBuffer_->getData(); - length = std::min(static_cast(magnitudeBuffer_->getSize()), length); + auto magnitudeBufferData = magnitudeArray_->span(); + length = std::min(static_cast(magnitudeArray_->getSize()), length); const auto rangeScaleFactor = maxDecibels_ == minDecibels_ ? 1 : 1 / (maxDecibels_ - minDecibels_); @@ -121,18 +126,19 @@ void AnalyserNode::getByteFrequencyData(uint8_t *data, int length) { void AnalyserNode::getFloatTimeDomainData(float *data, int length) { auto size = std::min(fftSize_, length); - inputBuffer_->pop_back(data, size, std::max(0, fftSize_ - size), true); + + inputArray_->pop_back(data, size, std::max(0, fftSize_ - size), true); } void AnalyserNode::getByteTimeDomainData(uint8_t *data, int length) { auto size = std::min(fftSize_, length); - inputBuffer_->pop_back(tempBuffer_->getData(), fftSize_, std::max(0, fftSize_ - size), true); + inputArray_->pop_back(*tempArray_, size, std::max(0, fftSize_ - size), true); - for (int i = 0; i < size; i++) { - auto value = tempBuffer_->getData()[i]; + auto values = tempArray_->span(); - float scaledValue = 128 * (value + 1); + for (int i = 0; i < size; i++) { + float scaledValue = 128 * (values[i] + 1); if (scaledValue < 0) { scaledValue = 0; @@ -145,20 +151,20 @@ void AnalyserNode::getByteTimeDomainData(uint8_t *data, int length) { } } -std::shared_ptr AnalyserNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr AnalyserNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { // Analyser should behave like a sniffer node, it should not modify the - // processingBus but instead copy the data to its own input buffer. + // processingBuffer but instead copy the data to its own input buffer. - // Down mix the input bus to mono - downMixBus_->copy(processingBus.get()); - // Copy the down mixed bus to the input buffer (circular buffer) - inputBuffer_->push_back(downMixBus_->getChannel(0)->getData(), framesToProcess, true); + // Down mix the input buffer to mono + downMixBuffer_->copy(*processingBuffer); + // Copy the down mixed buffer to the input buffer (circular buffer) + inputArray_->push_back(*downMixBuffer_->getChannel(0), framesToProcess, true); shouldDoFFTAnalysis_ = true; - return processingBus; + return processingBuffer; } void AnalyserNode::doFFTAnalysis() { @@ -170,20 +176,20 @@ void AnalyserNode::doFFTAnalysis() { // We want to copy last fftSize_ elements added to the input buffer to apply // the window. - inputBuffer_->pop_back(tempBuffer_->getData(), fftSize_, 0, true); + inputArray_->pop_back(*tempArray_, fftSize_, 0, true); - dsp::multiply(tempBuffer_->getData(), windowData_->getData(), tempBuffer_->getData(), fftSize_); + tempArray_->multiply(*windowData_, fftSize_); // do fft analysis - get frequency domain data - fft_->doFFT(tempBuffer_->getData(), complexData_); + fft_->doFFT(*tempArray_, complexData_); // Zero out nquist component complexData_[0] = std::complex(complexData_[0].real(), 0); const float magnitudeScale = 1.0f / static_cast(fftSize_); - auto magnitudeBufferData = magnitudeBuffer_->getData(); + auto magnitudeBufferData = magnitudeArray_->span(); - for (int i = 0; i < magnitudeBuffer_->getSize(); i++) { + for (int i = 0; i < magnitudeArray_->getSize(); i++) { auto scalarMagnitude = std::abs(complexData_[i]) * magnitudeScale; magnitudeBufferData[i] = static_cast( smoothingTimeConstant_ * magnitudeBufferData[i] + @@ -203,10 +209,10 @@ void AnalyserNode::setWindowData(AnalyserNode::WindowType type, int size) { switch (windowType_) { case WindowType::BLACKMAN: - dsp::Blackman().apply(windowData_->getData(), static_cast(windowData_->getSize())); + dsp::Blackman().apply(windowData_->span()); break; case WindowType::HANN: - dsp::Hann().apply(windowData_->getData(), static_cast(windowData_->getSize())); + dsp::Hann().apply(windowData_->span()); break; } } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h index b5e22822f..c1f6db736 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/analysis/AnalyserNode.h @@ -12,7 +12,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioArray; class CircularAudioArray; struct AnalyserOptions; @@ -43,8 +43,8 @@ class AnalyserNode : public AudioNode { void getByteTimeDomainData(uint8_t *data, int length); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: @@ -56,13 +56,13 @@ class AnalyserNode : public AudioNode { WindowType windowType_; std::shared_ptr windowData_; - std::unique_ptr inputBuffer_; - std::unique_ptr downMixBus_; - std::unique_ptr tempBuffer_; + std::unique_ptr inputArray_; + std::unique_ptr downMixBuffer_; + std::unique_ptr tempArray_; std::unique_ptr fft_; std::vector> complexData_; - std::unique_ptr magnitudeBuffer_; + std::unique_ptr magnitudeArray_; bool shouldDoFFTAnalysis_{true}; void doFFTAnalysis(); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp index 356bb367c..e11763597 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.cpp @@ -2,8 +2,8 @@ #include #include #include -#include #include +#include #include namespace audioapi { @@ -26,25 +26,25 @@ double AudioDestinationNode::getCurrentTime() const { } void AudioDestinationNode::renderAudio( - const std::shared_ptr &destinationBus, + const std::shared_ptr &destinationBuffer, int numFrames) { - if (numFrames < 0 || !destinationBus || !isInitialized_) { + if (numFrames < 0 || !destinationBuffer || !isInitialized_) { return; } if (std::shared_ptr context = context_.lock()) { - context->getGraphManager()->preProcessGraph(); + context->getGraphManager()->preProcessGraph(); } - destinationBus->zero(); + destinationBuffer->zero(); - auto processedBus = processAudio(destinationBus, numFrames, true); + auto processedBuffer = processAudio(destinationBuffer, numFrames, true); - if (processedBus && processedBus != destinationBus) { - destinationBus->copy(processedBus.get()); + if (processedBuffer && processedBuffer != destinationBuffer) { + destinationBuffer->copy(*processedBuffer); } - destinationBus->normalize(); + destinationBuffer->normalize(); currentSampleFrame_.fetch_add(numFrames, std::memory_order_release); } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.h index d84598dd2..18750a389 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/destinations/AudioDestinationNode.h @@ -11,7 +11,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class BaseAudioContext; class AudioDestinationNode : public AudioNode { @@ -21,13 +21,15 @@ class AudioDestinationNode : public AudioNode { std::size_t getCurrentSampleFrame() const; double getCurrentTime() const; - void renderAudio(const std::shared_ptr &audioData, int numFrames); + void renderAudio(const std::shared_ptr &audioData, int numFrames); protected: // DestinationNode is triggered by AudioContext using renderAudio // processNode function is not necessary and is never called. - std::shared_ptr processNode(const std::shared_ptr &processingBus, int) final { - return processingBus; + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, + int) final { + return processingBuffer; }; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp index 152c14289..1e31c345c 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.cpp @@ -30,7 +30,7 @@ #include #include #include -#include +#include #include #include @@ -39,9 +39,12 @@ namespace audioapi { -BiquadFilterNode::BiquadFilterNode(const std::shared_ptr& context, const BiquadFilterOptions &options) : AudioNode(context, options) { - frequencyParam_ = - std::make_shared(options.frequency, 0.0f, context->getNyquistFrequency(), context); +BiquadFilterNode::BiquadFilterNode( + const std::shared_ptr &context, + const BiquadFilterOptions &options) + : AudioNode(context, options) { + frequencyParam_ = std::make_shared( + options.frequency, 0.0f, context->getNyquistFrequency(), context); detuneParam_ = std::make_shared( options.detune, -1200 * LOG2_MOST_POSITIVE_SINGLE_FLOAT, @@ -383,10 +386,10 @@ void BiquadFilterNode::applyFilter() { } } -std::shared_ptr BiquadFilterNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr BiquadFilterNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { - int numChannels = processingBus->getNumberOfChannels(); + int numChannels = processingBuffer->getNumberOfChannels(); applyFilter(); @@ -400,31 +403,37 @@ std::shared_ptr BiquadFilterNode::processNode( float x1, x2, y1, y2; for (int c = 0; c < numChannels; ++c) { - auto channelData = processingBus->getChannel(c)->getData(); + auto channel = processingBuffer->getChannel(c)->subSpan(framesToProcess); x1 = x1_[c]; x2 = x2_[c]; y1 = y1_[c]; y2 = y2_[c]; - for (int i = 0; i < framesToProcess; ++i) { - float input = channelData[i]; - float output = b0 * input + b1 * x1 + b2 * x2 - a1 * y1 - a2 * y2; + for (float &sample : channel) { + auto input = sample; + auto output = b0 * input + b1 * x1 + b2 * x2 - a1 * y1 - a2 * y2; - channelData[i] = output; + // Avoid denormalized numbers + if (std::abs(output) < 1e-15f) { + output = 0.0f; + } + + sample = output; x2 = x1; x1 = input; y2 = y1; y1 = output; } + x1_[c] = x1; x2_[c] = x2; y1_[c] = y1; y2_[c] = y2; } - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h index 38f140c3e..3763f9c2e 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/BiquadFilterNode.h @@ -45,7 +45,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; struct BiquadFilterOptions; class BiquadFilterNode : public AudioNode { @@ -72,8 +72,8 @@ class BiquadFilterNode : public AudioNode { size_t length); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp index 8b5ceed9d..df8794ad8 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.cpp @@ -1,18 +1,20 @@ -#include #include #include -#include #include -#include +#include #include +#include #include +#include #include #include #include #include namespace audioapi { -ConvolverNode::ConvolverNode(const std::shared_ptr& context, const ConvolverOptions &options) +ConvolverNode::ConvolverNode( + const std::shared_ptr &context, + const ConvolverOptions &options) : AudioNode(context, options), gainCalibrationSampleRate_(context->getSampleRate()), remainingSegments_(0), @@ -20,10 +22,10 @@ ConvolverNode::ConvolverNode(const std::shared_ptr& context, c normalize_(!options.disableNormalization), signalledToStop_(false), scaleFactor_(1.0f), - intermediateBus_(nullptr), + intermediateBuffer_(nullptr), buffer_(nullptr), internalBuffer_(nullptr) { - setBuffer(options.bus); + setBuffer(options.buffer); isInitialized_ = true; } @@ -53,22 +55,20 @@ void ConvolverNode::setBuffer(const std::shared_ptr &buffer) { calculateNormalizationScale(); threadPool_ = std::make_shared(4); convolvers_.clear(); - for (int i = 0; i < buffer->getNumberOfChannels(); ++i) { + for (size_t i = 0; i < buffer->getNumberOfChannels(); ++i) { convolvers_.emplace_back(); - AudioArray channelData(buffer->getLength()); - memcpy(channelData.getData(), buffer->getChannelData(i), buffer->getLength() * sizeof(float)); - convolvers_.back().init(RENDER_QUANTUM_SIZE, channelData, buffer->getLength()); + AudioArray channelData(*buffer->getChannel(i)); + convolvers_.back().init(RENDER_QUANTUM_SIZE, channelData, buffer->getSize()); } if (buffer->getNumberOfChannels() == 1) { // add one more convolver, because right now input is always stereo convolvers_.emplace_back(); - AudioArray channelData(buffer->getLength()); - memcpy(channelData.getData(), buffer->getChannelData(0), buffer->getLength() * sizeof(float)); - convolvers_.back().init(RENDER_QUANTUM_SIZE, channelData, buffer->getLength()); + AudioArray channelData(*buffer->getChannel(0)); + convolvers_.back().init(RENDER_QUANTUM_SIZE, channelData, buffer->getSize()); } - internalBuffer_ = - std::make_shared(RENDER_QUANTUM_SIZE * 2, channelCount_, buffer->getSampleRate()); - intermediateBus_ = std::make_shared( + internalBuffer_ = std::make_shared( + RENDER_QUANTUM_SIZE * 2, channelCount_, buffer->getSampleRate()); + intermediateBuffer_ = std::make_shared( RENDER_QUANTUM_SIZE, convolvers_.size(), buffer->getSampleRate()); internalBufferIndex_ = 0; } @@ -82,20 +82,20 @@ void ConvolverNode::onInputDisabled() { } } -std::shared_ptr ConvolverNode::processInputs( - const std::shared_ptr &outputBus, +std::shared_ptr ConvolverNode::processInputs( + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed) { if (internalBufferIndex_ < framesToProcess) { - return AudioNode::processInputs(outputBus, RENDER_QUANTUM_SIZE, false); + return AudioNode::processInputs(outputBuffer, RENDER_QUANTUM_SIZE, false); } - return AudioNode::processInputs(outputBus, 0, false); + return AudioNode::processInputs(outputBuffer, 0, false); } -// processing pipeline: processingBus -> intermediateBus_ -> audioBus_ (mixing -// with intermediateBus_) -std::shared_ptr ConvolverNode::processNode( - const std::shared_ptr &processingBus, +// processing pipeline: processingBuffer -> intermediateBuffer_ -> audioBuffer_ (mixing +// with intermediateBuffer_) +std::shared_ptr ConvolverNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { if (signalledToStop_) { if (remainingSegments_ > 0) { @@ -104,49 +104,43 @@ std::shared_ptr ConvolverNode::processNode( disable(); signalledToStop_ = false; internalBufferIndex_ = 0; - return processingBus; + return processingBuffer; } } if (internalBufferIndex_ < framesToProcess) { - performConvolution(processingBus); // result returned to intermediateBus_ - audioBus_->sum(intermediateBus_.get()); + performConvolution(processingBuffer); // result returned to intermediateBuffer_ + audioBuffer_->sum(*intermediateBuffer_); - internalBuffer_->copy(audioBus_.get(), 0, internalBufferIndex_, RENDER_QUANTUM_SIZE); + internalBuffer_->copy(*audioBuffer_, 0, internalBufferIndex_, RENDER_QUANTUM_SIZE); internalBufferIndex_ += RENDER_QUANTUM_SIZE; } - audioBus_->zero(); - audioBus_->copy(internalBuffer_.get(), 0, 0, framesToProcess); + audioBuffer_->zero(); + audioBuffer_->copy(*internalBuffer_, 0, 0, framesToProcess); int remainingFrames = internalBufferIndex_ - framesToProcess; if (remainingFrames > 0) { - for (int i = 0; i < internalBuffer_->getNumberOfChannels(); ++i) { - memmove( - internalBuffer_->getChannel(i)->getData(), - internalBuffer_->getChannel(i)->getData() + framesToProcess, - remainingFrames * sizeof(float)); + for (size_t ch = 0; ch < internalBuffer_->getNumberOfChannels(); ++ch) { + internalBuffer_->getChannel(ch)->copyWithin(framesToProcess, 0, remainingFrames); } } + internalBufferIndex_ -= framesToProcess; - for (int i = 0; i < audioBus_->getNumberOfChannels(); ++i) { - dsp::multiplyByScalar( - audioBus_->getChannel(i)->getData(), - scaleFactor_, - audioBus_->getChannel(i)->getData(), - framesToProcess); + for (int i = 0; i < audioBuffer_->getNumberOfChannels(); ++i) { + audioBuffer_->getChannel(i)->scale(scaleFactor_); } - return audioBus_; + return audioBuffer_; } void ConvolverNode::calculateNormalizationScale() { int numberOfChannels = buffer_->getNumberOfChannels(); - int length = buffer_->getLength(); + auto length = buffer_->getSize(); float power = 0; - for (int channel = 0; channel < numberOfChannels; ++channel) { + for (size_t channel = 0; channel < numberOfChannels; ++channel) { float channelPower = 0; - auto channelData = buffer_->getChannelData(channel); + auto channelData = buffer_->getChannel(channel)->span(); for (int i = 0; i < length; ++i) { float sample = channelData[i]; channelPower += sample * sample; @@ -163,15 +157,15 @@ void ConvolverNode::calculateNormalizationScale() { scaleFactor_ *= gainCalibrationSampleRate_ / buffer_->getSampleRate(); } -void ConvolverNode::performConvolution(const std::shared_ptr &processingBus) { - if (processingBus->getNumberOfChannels() == 1) { +void ConvolverNode::performConvolution(const std::shared_ptr &processingBuffer) { + if (processingBuffer->getNumberOfChannels() == 1) { for (int i = 0; i < convolvers_.size(); ++i) { threadPool_->schedule([&, i] { convolvers_[i].process( - processingBus->getChannel(0)->getData(), intermediateBus_->getChannel(i)->getData()); + *processingBuffer->getChannel(0), *intermediateBuffer_->getChannel(i)); }); } - } else if (processingBus->getNumberOfChannels() == 2) { + } else if (processingBuffer->getNumberOfChannels() == 2) { std::vector inputChannelMap; std::vector outputChannelMap; if (convolvers_.size() == 2) { @@ -182,10 +176,10 @@ void ConvolverNode::performConvolution(const std::shared_ptr &processi outputChannelMap = {0, 3, 2, 1}; } for (int i = 0; i < convolvers_.size(); ++i) { - threadPool_->schedule([this, i, inputChannelMap, outputChannelMap, &processingBus] { + threadPool_->schedule([this, i, inputChannelMap, outputChannelMap, &processingBuffer] { convolvers_[i].process( - processingBus->getChannel(inputChannelMap[i])->getData(), - intermediateBus_->getChannel(outputChannelMap[i])->getData()); + *processingBuffer->getChannel(inputChannelMap[i]), + *intermediateBuffer_->getChannel(outputChannelMap[i])); }); } } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h index 5a705787e..0f3fcfdbb 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/ConvolverNode.h @@ -15,7 +15,7 @@ static constexpr double MIN_IR_POWER = 0.000125; namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioBuffer; struct ConvolverOptions; @@ -31,13 +31,13 @@ class ConvolverNode : public AudioNode { void setBuffer(const std::shared_ptr &buffer); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: - std::shared_ptr processInputs( - const std::shared_ptr &outputBus, + std::shared_ptr processInputs( + const std::shared_ptr &outputBuffer, int framesToProcess, bool checkIsAlreadyProcessed) override; void onInputDisabled() override; @@ -47,18 +47,18 @@ class ConvolverNode : public AudioNode { bool normalize_; bool signalledToStop_; float scaleFactor_; - std::shared_ptr intermediateBus_; + std::shared_ptr intermediateBuffer_; // impulse response buffer std::shared_ptr buffer_; // buffer to hold internal processed data - std::shared_ptr internalBuffer_; + std::shared_ptr internalBuffer_; // vectors of convolvers, one per channel std::vector convolvers_; std::shared_ptr threadPool_; void calculateNormalizationScale(); - void performConvolution(const std::shared_ptr &processingBus); + void performConvolution(const std::shared_ptr &processingBuffer); }; } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp index 15b53dbe8..2b54e4656 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.cpp @@ -3,16 +3,17 @@ #include #include #include -#include +#include #include namespace audioapi { -DelayNode::DelayNode(const std::shared_ptr& context, const DelayOptions &options) +DelayNode::DelayNode(const std::shared_ptr &context, const DelayOptions &options) : AudioNode(context, options), - delayTimeParam_(std::make_shared(options.delayTime, 0, options.maxDelayTime, context)), + delayTimeParam_( + std::make_shared(options.delayTime, 0, options.maxDelayTime, context)), delayBuffer_( - std::make_shared( + std::make_shared( static_cast( options.maxDelayTime * context->getSampleRate() + 1), // +1 to enable delayTime equal to maxDelayTime @@ -38,11 +39,11 @@ void DelayNode::onInputDisabled() { } void DelayNode::delayBufferOperation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess, size_t &operationStartingIndex, DelayNode::BufferAction action) { - size_t processingBusStartIndex = 0; + size_t processingBufferStartIndex = 0; // handle buffer wrap around if (operationStartingIndex + framesToProcess > delayBuffer_->getSize()) { @@ -50,24 +51,24 @@ void DelayNode::delayBufferOperation( if (action == BufferAction::WRITE) { delayBuffer_->sum( - processingBus.get(), processingBusStartIndex, operationStartingIndex, framesToEnd); + *processingBuffer, processingBufferStartIndex, operationStartingIndex, framesToEnd); } else { // READ - processingBus->sum( - delayBuffer_.get(), operationStartingIndex, processingBusStartIndex, framesToEnd); + processingBuffer->sum( + *delayBuffer_, operationStartingIndex, processingBufferStartIndex, framesToEnd); } operationStartingIndex = 0; - processingBusStartIndex += framesToEnd; + processingBufferStartIndex += framesToEnd; framesToProcess -= framesToEnd; } if (action == BufferAction::WRITE) { delayBuffer_->sum( - processingBus.get(), processingBusStartIndex, operationStartingIndex, framesToProcess); - processingBus->zero(); + *processingBuffer, processingBufferStartIndex, operationStartingIndex, framesToProcess); + processingBuffer->zero(); } else { // READ - processingBus->sum( - delayBuffer_.get(), operationStartingIndex, processingBusStartIndex, framesToProcess); + processingBuffer->sum( + *delayBuffer_, operationStartingIndex, processingBufferStartIndex, framesToProcess); delayBuffer_->zero(operationStartingIndex, framesToProcess); } @@ -76,35 +77,38 @@ void DelayNode::delayBufferOperation( // delay buffer always has channelCount_ channels // processing is split into two parts -// 1. writing to delay buffer (mixing if needed) from processing bus -// 2. reading from delay buffer to processing bus (mixing if needed) with delay -std::shared_ptr DelayNode::processNode( - const std::shared_ptr &processingBus, +// 1. writing to delay buffer (mixing if needed) from processing buffer +// 2. reading from delay buffer to processing buffer (mixing if needed) with delay +std::shared_ptr DelayNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { // handling tail processing if (signalledToStop_) { if (remainingFrames_ <= 0) { disable(); signalledToStop_ = false; - return processingBus; + return processingBuffer; } - delayBufferOperation(processingBus, framesToProcess, readIndex_, DelayNode::BufferAction::READ); + delayBufferOperation( + processingBuffer, framesToProcess, readIndex_, DelayNode::BufferAction::READ); remainingFrames_ -= framesToProcess; - return processingBus; + return processingBuffer; } // normal processing std::shared_ptr context = context_.lock(); if (context == nullptr) - return processingBus; + return processingBuffer; auto delayTime = delayTimeParam_->processKRateParam(framesToProcess, context->getCurrentTime()); size_t writeIndex = static_cast(readIndex_ + delayTime * context->getSampleRate()) % delayBuffer_->getSize(); - delayBufferOperation(processingBus, framesToProcess, writeIndex, DelayNode::BufferAction::WRITE); - delayBufferOperation(processingBus, framesToProcess, readIndex_, DelayNode::BufferAction::READ); + delayBufferOperation( + processingBuffer, framesToProcess, writeIndex, DelayNode::BufferAction::WRITE); + delayBufferOperation( + processingBuffer, framesToProcess, readIndex_, DelayNode::BufferAction::READ); - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h index 0d280e0fd..eb6f74cd2 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/DelayNode.h @@ -8,7 +8,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; struct DelayOptions; class DelayNode : public AudioNode { @@ -18,20 +18,20 @@ class DelayNode : public AudioNode { [[nodiscard]] std::shared_ptr getDelayTimeParam() const; protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: void onInputDisabled() override; enum class BufferAction { READ, WRITE }; void delayBufferOperation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess, size_t &operationStartingIndex, BufferAction action); std::shared_ptr delayTimeParam_; - std::shared_ptr delayBuffer_; + std::shared_ptr delayBuffer_; size_t readIndex_ = 0; bool signalledToStop_ = false; int remainingFrames_ = 0; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp index bb8865879..69bbf4c9c 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include namespace audioapi { @@ -23,23 +23,22 @@ std::shared_ptr GainNode::getGainParam() const { return gainParam_; } -std::shared_ptr GainNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr GainNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { std::shared_ptr context = context_.lock(); if (context == nullptr) - return processingBus; + return processingBuffer; double time = context->getCurrentTime(); auto gainParamValues = gainParam_->processARateParam(framesToProcess, time); - for (int i = 0; i < processingBus->getNumberOfChannels(); i += 1) { - dsp::multiply( - processingBus->getChannel(i)->getData(), - gainParamValues->getChannel(0)->getData(), - processingBus->getChannel(i)->getData(), - framesToProcess); + auto gainValues = gainParamValues->getChannel(0); + + for (size_t i = 0; i < processingBuffer->getNumberOfChannels(); i++) { + auto channel = processingBuffer->getChannel(i); + channel->multiply(*gainValues, framesToProcess); } - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h index b224f8f55..86b18cc77 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/GainNode.h @@ -7,7 +7,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; struct GainOptions; class GainNode : public AudioNode { @@ -17,8 +17,8 @@ class GainNode : public AudioNode { [[nodiscard]] std::shared_ptr getGainParam() const; protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp index bf656d915..079c066b5 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.cpp @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -39,9 +39,7 @@ namespace audioapi { IIRFilterNode::IIRFilterNode( const std::shared_ptr &context, const IIRFilterOptions &options) - : AudioNode(context, options), - feedforward_(std::move(options.feedforward)), - feedback_(std::move(options.feedback)) { + : AudioNode(context, options), feedforward_(options.feedforward), feedback_(options.feedback) { int maxChannels = MAX_CHANNEL_COUNT; xBuffers_.resize(maxChannels); @@ -123,10 +121,10 @@ void IIRFilterNode::getFrequencyResponse( // TODO: tail -std::shared_ptr IIRFilterNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr IIRFilterNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { - int numChannels = processingBus->getNumberOfChannels(); + int numChannels = processingBuffer->getNumberOfChannels(); size_t feedforwardLength = feedforward_.size(); size_t feedbackLength = feedback_.size(); @@ -135,37 +133,44 @@ std::shared_ptr IIRFilterNode::processNode( int mask = bufferLength - 1; for (int c = 0; c < numChannels; ++c) { - auto channelData = processingBus->getChannel(c)->getData(); + auto channel = processingBuffer->getChannel(c)->subSpan(framesToProcess); + auto &x = xBuffers_[c]; auto &y = yBuffers_[c]; size_t bufferIndex = bufferIndices[c]; - for (int n = 0; n < framesToProcess; ++n) { - float yn = feedforward_[0] * channelData[n]; + for (float &sample : channel) { + const float x_n = sample; + float y_n = feedforward_[0] * sample; for (int k = 1; k < minLength; ++k) { int m = (bufferIndex - k) & mask; - yn = std::fma(feedforward_[k], x[m], yn); - yn = std::fma(-feedback_[k], y[m], yn); + y_n = std::fma(feedforward_[k], x[m], y_n); + y_n = std::fma(-feedback_[k], y[m], y_n); } for (int k = minLength; k < feedforwardLength; ++k) { - yn = std::fma(feedforward_[k], x[(bufferIndex - k) & mask], yn); + y_n = std::fma(feedforward_[k], x[(bufferIndex - k) & mask], y_n); } for (int k = minLength; k < feedbackLength; ++k) { - yn = std::fma(-feedback_[k], y[(bufferIndex - k) & (bufferLength - 1)], yn); + y_n = std::fma(-feedback_[k], y[(bufferIndex - k) & (bufferLength - 1)], y_n); + } + + // Avoid denormalized numbers + if (std::abs(y_n) < 1e-15f) { + y_n = 0.0f; } - channelData[n] = yn; + sample = y_n; - x[bufferIndex] = channelData[n]; - y[bufferIndex] = yn; + x[bufferIndex] = x_n; + y[bufferIndex] = y_n; bufferIndex = (bufferIndex + 1) & (bufferLength - 1); } bufferIndices[c] = bufferIndex; } - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h index 4ac80bf8b..28ebcef9d 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/IIRFilterNode.h @@ -49,8 +49,8 @@ class IIRFilterNode : public AudioNode { size_t length); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.cpp index 07bbcd86e..423c21789 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.cpp @@ -47,7 +47,8 @@ PeriodicWave::PeriodicWave(float sampleRate, bool disableNormalization) lowestFundamentalFrequency_ = static_cast(nyquistFrequency) / static_cast(getMaxNumberOfPartials()); scale_ = static_cast(getPeriodicWaveSize()) / static_cast(sampleRate_); - bandLimitedTables_ = new float *[numberOfRanges_]; + bandLimitedTables_ = + std::make_unique(getPeriodicWaveSize(), numberOfRanges_, sampleRate_); fft_ = std::make_unique(getPeriodicWaveSize()); } @@ -69,14 +70,6 @@ PeriodicWave::PeriodicWave( createBandLimitedTables(complexData, length); } -PeriodicWave::~PeriodicWave() { - for (int i = 0; i < numberOfRanges_; i++) { - delete[] bandLimitedTables_[i]; - } - delete[] bandLimitedTables_; - bandLimitedTables_ = nullptr; -} - int PeriodicWave::getPeriodicWaveSize() const { if (sampleRate_ <= 24000) { return 2048; @@ -94,13 +87,10 @@ float PeriodicWave::getScale() const { } float PeriodicWave::getSample(float fundamentalFrequency, float phase, float phaseIncrement) { - float *lowerWaveData = nullptr; - float *higherWaveData = nullptr; + WaveTableSource source = getWaveDataForFundamentalFrequency(fundamentalFrequency); - auto interpolationFactor = - getWaveDataForFundamentalFrequency(fundamentalFrequency, lowerWaveData, higherWaveData); - - return doInterpolation(phase, phaseIncrement, interpolationFactor, lowerWaveData, higherWaveData); + return doInterpolation( + phase, phaseIncrement, source.interpolationFactor, *source.lower, *source.higher); } int PeriodicWave::getMaxNumberOfPartials() const { @@ -217,31 +207,24 @@ void PeriodicWave::createBandLimitedTables( // Zero out the DC and nquist components. complexFFTData[0] = {0.0f, 0.0f}; - bandLimitedTables_[rangeIndex] = new float[fftSize]; + auto channel = bandLimitedTables_->getChannel(rangeIndex); // Perform the inverse FFT to get the time domain representation of the // band-limited waveform. - fft_->doInverseFFT(complexFFTData, bandLimitedTables_[rangeIndex]); + fft_->doInverseFFT(complexFFTData, *channel); if (!disableNormalization_ && rangeIndex == 0) { - float maxValue = dsp::maximumMagnitude(bandLimitedTables_[rangeIndex], fftSize); + float maxValue = channel->getMaxAbsValue(); if (maxValue != 0) { normalizationFactor = 1.0f / maxValue; } } - dsp::multiplyByScalar( - bandLimitedTables_[rangeIndex], - normalizationFactor, - bandLimitedTables_[rangeIndex], - fftSize); + channel->scale(normalizationFactor); } } -float PeriodicWave::getWaveDataForFundamentalFrequency( - float fundamentalFrequency, - float *&lowerWaveData, - float *&higherWaveData) { +WaveTableSource PeriodicWave::getWaveDataForFundamentalFrequency(float fundamentalFrequency) const { // negative frequencies are allowed and will be treated as positive. fundamentalFrequency = std::fabs(fundamentalFrequency); @@ -260,19 +243,19 @@ float PeriodicWave::getWaveDataForFundamentalFrequency( lowerRangeIndex < numberOfRanges_ - 1 ? lowerRangeIndex + 1 : lowerRangeIndex; // get the wave data for the lower and higher range index. - lowerWaveData = bandLimitedTables_[lowerRangeIndex]; - higherWaveData = bandLimitedTables_[higherRangeIndex]; - // calculate the interpolation factor between the lower and higher range data. - return pitchRange - static_cast(lowerRangeIndex); + return { + bandLimitedTables_->getChannel(lowerRangeIndex), + bandLimitedTables_->getChannel(higherRangeIndex), + pitchRange - static_cast(lowerRangeIndex)}; } float PeriodicWave::doInterpolation( float phase, float phaseIncrement, float waveTableInterpolationFactor, - const float *lowerWaveData, - const float *higherWaveData) const { + const AudioArray &lowerWaveData, + const AudioArray &higherWaveData) const { float lowerWaveDataSample = 0; float higherWaveDataSample = 0; @@ -336,7 +319,6 @@ float PeriodicWave::doInterpolation( } } - return (1 - waveTableInterpolationFactor) * higherWaveDataSample + - waveTableInterpolationFactor * lowerWaveDataSample; + return std::lerp(higherWaveDataSample, lowerWaveDataSample, waveTableInterpolationFactor); } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.h index e04b83dae..d39cb1e15 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/PeriodicWave.h @@ -30,6 +30,7 @@ #include #include +#include #include #include @@ -38,6 +39,13 @@ #include namespace audioapi { + +struct WaveTableSource { + const AudioArray *lower; + const AudioArray *higher; + float interpolationFactor; +}; + class PeriodicWave { public: explicit PeriodicWave(float sampleRate, OscillatorType type, bool disableNormalization); @@ -46,7 +54,6 @@ class PeriodicWave { const std::vector> &complexData, int length, bool disableNormalization); - ~PeriodicWave(); [[nodiscard]] int getPeriodicWaveSize() const; [[nodiscard]] float getScale() const; @@ -81,21 +88,18 @@ class PeriodicWave { // This function returns the interpolation factor between the lower and higher // range data and sets the lower and higher wave data for the given // fundamental frequency. - float getWaveDataForFundamentalFrequency( - float fundamentalFrequency, - float *&lowerWaveData, - float *&higherWaveData); + WaveTableSource getWaveDataForFundamentalFrequency(float fundamentalFrequency) const; // This function performs interpolation between the lower and higher range // data based on the interpolation factor and current buffer index. Type of // interpolation is determined by the phase increment. Returns the // interpolated sample. - float doInterpolation( + [[nodiscard]] float doInterpolation( float bufferIndex, float phaseIncrement, float waveTableInterpolationFactor, - const float *lowerWaveData, - const float *higherWaveData) const; + const AudioArray &lowerWaveData, + const AudioArray &higherWaveData) const; // determines the time resolution of the waveform. float sampleRate_; @@ -108,8 +112,7 @@ class PeriodicWave { // rate. float scale_; // array of band-limited waveforms. - float **bandLimitedTables_; - // + std::unique_ptr bandLimitedTables_; std::unique_ptr fft_; // if true, the waveTable is not normalized. bool disableNormalization_; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.cpp index ae7c225e3..7b81797c5 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include // https://webaudio.github.io/web-audio-api/#stereopanner-algorithm @@ -22,62 +22,59 @@ std::shared_ptr StereoPannerNode::getPanParam() const { return panParam_; } -std::shared_ptr StereoPannerNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr StereoPannerNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { std::shared_ptr context = context_.lock(); if (context == nullptr) - return processingBus; + return processingBuffer; double time = context->getCurrentTime(); double deltaTime = 1.0 / context->getSampleRate(); - auto *inputLeft = processingBus->getChannelByType(AudioBus::ChannelLeft); - auto panParamValues = - panParam_->processARateParam(framesToProcess, time)->getChannel(0)->getData(); + auto panParamValues = panParam_->processARateParam(framesToProcess, time)->getChannel(0)->span(); - auto *outputLeft = audioBus_->getChannelByType(AudioBus::ChannelLeft); - auto *outputRight = audioBus_->getChannelByType(AudioBus::ChannelRight); + auto outputLeft = audioBuffer_->getChannelByType(AudioBuffer::ChannelLeft)->span(); + auto outputRight = audioBuffer_->getChannelByType(AudioBuffer::ChannelRight)->span(); // Input is mono - if (processingBus->getNumberOfChannels() == 1) { - for (int i = 0; i < framesToProcess; i++) { - auto pan = std::clamp(panParamValues[i], -1.0f, 1.0f); - auto x = (pan + 1) / 2; - - auto gainL = static_cast(cos(x * PI / 2)); - auto gainR = static_cast(sin(x * PI / 2)); + if (processingBuffer->getNumberOfChannels() == 1) { + auto inputLeft = processingBuffer->getChannelByType(AudioBuffer::ChannelMono)->span(); - float input = (*inputLeft)[i]; + for (int i = 0; i < framesToProcess; i++) { + const auto pan = std::clamp(panParamValues[i], -1.0f, 1.0f); + const auto x = (pan + 1) / 2; + const auto angle = x * (PI / 2); + const float input = inputLeft[i]; - (*outputLeft)[i] = input * gainL; - (*outputRight)[i] = input * gainR; + outputLeft[i] = input * std::cos(angle); + outputRight[i] = input * std::sin(angle); time += deltaTime; } } else { // Input is stereo - auto *inputRight = processingBus->getChannelByType(AudioBus::ChannelRight); - for (int i = 0; i < framesToProcess; i++) { - auto pan = std::clamp(panParamValues[i], -1.0f, 1.0f); - auto x = (pan <= 0 ? pan + 1 : pan); - - auto gainL = static_cast(cos(x * PI / 2)); - auto gainR = static_cast(sin(x * PI / 2)); + auto inputLeft = processingBuffer->getChannelByType(AudioBuffer::ChannelLeft)->span(); + auto inputRight = processingBuffer->getChannelByType(AudioBuffer::ChannelRight)->span(); - float inputL = (*inputLeft)[i]; - float inputR = (*inputRight)[i]; + for (int i = 0; i < framesToProcess; i++) { + const auto pan = std::clamp(panParamValues[i], -1.0f, 1.0f); + const auto x = (pan <= 0 ? pan + 1 : pan); + const auto gainL = static_cast(cos(x * PI / 2)); + const auto gainR = static_cast(sin(x * PI / 2)); + const float inputL = inputLeft[i]; + const float inputR = inputRight[i]; if (pan <= 0) { - (*outputLeft)[i] = inputL + inputR * gainL; - (*outputRight)[i] = inputR * gainR; + outputLeft[i] = inputL + inputR * gainL; + outputRight[i] = inputR * gainR; } else { - (*outputLeft)[i] = inputL * gainL; - (*outputRight)[i] = inputR + inputL * gainR; + outputLeft[i] = inputL * gainL; + outputRight[i] = inputR + inputL * gainR; } time += deltaTime; } } - return audioBus_; + return audioBuffer_; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.h index 6f79ecb52..064a9e91b 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/StereoPannerNode.h @@ -9,7 +9,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; struct StereoPannerOptions; class StereoPannerNode : public AudioNode { @@ -21,8 +21,8 @@ class StereoPannerNode : public AudioNode { [[nodiscard]] std::shared_ptr getPanParam() const; protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.cpp index 9824d763d..7ae64471c 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.cpp @@ -2,8 +2,8 @@ #include #include #include -#include -#include +#include +#include #include #include @@ -17,7 +17,7 @@ WaveShaperNode::WaveShaperNode( : AudioNode(context, options), oversample_(options.oversample) { waveShapers_.reserve(6); - for (int i = 0; i < channelCount_; i++) { + for (size_t i = 0; i < channelCount_; i++) { waveShapers_.emplace_back(std::make_unique(nullptr)); } setCurve(options.curve); @@ -37,12 +37,12 @@ void WaveShaperNode::setOversample(OverSampleType type) { } } -std::shared_ptr WaveShaperNode::getCurve() const { +std::shared_ptr WaveShaperNode::getCurve() const { std::scoped_lock lock(mutex_); return curve_; } -void WaveShaperNode::setCurve(const std::shared_ptr &curve) { +void WaveShaperNode::setCurve(const std::shared_ptr &curve) { std::scoped_lock lock(mutex_); curve_ = curve; @@ -51,30 +51,30 @@ void WaveShaperNode::setCurve(const std::shared_ptr &curve) { } } -std::shared_ptr WaveShaperNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr WaveShaperNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { if (!isInitialized_) { - return processingBus; + return processingBuffer; } std::unique_lock lock(mutex_, std::try_to_lock); if (!lock.owns_lock()) { - return processingBus; + return processingBuffer; } if (curve_ == nullptr) { - return processingBus; + return processingBuffer; } - for (int channel = 0; channel < processingBus->getNumberOfChannels(); channel++) { - auto channelData = processingBus->getSharedChannel(channel); + for (size_t channel = 0; channel < processingBuffer->getNumberOfChannels(); channel++) { + auto channelData = processingBuffer->getChannel(channel); - waveShapers_[channel]->process(channelData, framesToProcess); + waveShapers_[channel]->process(*channelData, framesToProcess); } - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.h index d1620e02a..18a3f96a2 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WaveShaperNode.h @@ -14,8 +14,8 @@ namespace audioapi { -class AudioBus; -class AudioArray; +class AudioBuffer; +class AudioArrayBuffer; struct WaveShaperOptions; class WaveShaperNode : public AudioNode { @@ -25,19 +25,19 @@ class WaveShaperNode : public AudioNode { const WaveShaperOptions &options); [[nodiscard]] OverSampleType getOversample() const; - [[nodiscard]] std::shared_ptr getCurve() const; + [[nodiscard]] std::shared_ptr getCurve() const; void setOversample(OverSampleType); - void setCurve(const std::shared_ptr &curve); + void setCurve(const std::shared_ptr &curve); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: std::atomic oversample_; - std::shared_ptr curve_{}; + std::shared_ptr curve_; mutable std::mutex mutex_; std::vector> waveShapers_{}; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.cpp index 1a76ea72e..cac5c720a 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.cpp @@ -7,25 +7,26 @@ namespace audioapi { WorkletNode::WorkletNode( - std::shared_ptr context, + const std::shared_ptr &context, size_t bufferLength, size_t inputChannelCount, WorkletsRunner &&runtime) : AudioNode(context), workletRunner_(std::move(runtime)), - bus_(std::make_shared(bufferLength, inputChannelCount, context->getSampleRate())), + buffer_( + std::make_shared(bufferLength, inputChannelCount, context->getSampleRate())), bufferLength_(bufferLength), inputChannelCount_(inputChannelCount), curBuffIndex_(0) { isInitialized_ = true; } -std::shared_ptr WorkletNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr WorkletNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { size_t processed = 0; size_t channelCount_ = - std::min(inputChannelCount_, static_cast(processingBus->getNumberOfChannels())); + std::min(inputChannelCount_, static_cast(processingBuffer->getNumberOfChannels())); while (processed < framesToProcess) { size_t framesToWorkletInvoke = bufferLength_ - curBuffIndex_; size_t needsToProcess = framesToProcess - processed; @@ -34,7 +35,7 @@ std::shared_ptr WorkletNode::processNode( /// here we copy /// to [curBuffIndex_, curBuffIndex_ + shouldProcess] /// from [processed, processed + shouldProcess] - bus_->copy(processingBus.get(), processed, curBuffIndex_, shouldProcess); + buffer_->copy(*processingBuffer, processed, curBuffIndex_, shouldProcess); processed += shouldProcess; curBuffIndex_ += shouldProcess; @@ -49,16 +50,15 @@ std::shared_ptr WorkletNode::processNode( /// Arguments preparation auto jsArray = jsi::Array(uiRuntimeRaw, channelCount_); for (size_t ch = 0; ch < channelCount_; ch++) { - auto audioArray = std::make_shared(bufferLength_); - audioArray->copy(bus_->getChannel(ch)); - auto sharedAudioArray = std::make_shared(audioArray); + auto sharedAudioArray = std::make_shared(bufferLength_); + sharedAudioArray->copy(*buffer_->getChannel(ch)); auto sharedAudioArraySize = sharedAudioArray->size(); auto arrayBuffer = jsi::ArrayBuffer(uiRuntimeRaw, std::move(sharedAudioArray)); arrayBuffer.setExternalMemoryPressure(uiRuntimeRaw, sharedAudioArraySize); jsArray.setValueAtIndex(uiRuntimeRaw, ch, std::move(arrayBuffer)); } - bus_->zero(); + buffer_->zero(); /// Call the worklet workletRunner_.callUnsafe( @@ -68,7 +68,7 @@ std::shared_ptr WorkletNode::processNode( }); } - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.h index a830acb7e..0523f99fd 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletNode.h @@ -3,9 +3,9 @@ #include #include #include -#include #include -#include +#include +#include #include #include @@ -24,10 +24,10 @@ class WorkletNode : public AudioNode { : AudioNode(context) {} protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return processingBus; + return processingBuffer; } }; #else @@ -37,7 +37,7 @@ using namespace facebook; class WorkletNode : public AudioNode { public: explicit WorkletNode( - std::shared_ptr context, + const std::shared_ptr &context, size_t bufferLength, size_t inputChannelCount, WorkletsRunner &&workletRunner); @@ -45,13 +45,13 @@ class WorkletNode : public AudioNode { ~WorkletNode() override = default; protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: WorkletsRunner workletRunner_; - std::shared_ptr bus_; + std::shared_ptr buffer_; /// @brief Length of the byte buffer that will be passed to the AudioArrayBuffer size_t bufferLength_; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.cpp index e385f7b89..f1bb4c642 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.cpp @@ -6,7 +6,7 @@ namespace audioapi { WorkletProcessingNode::WorkletProcessingNode( - std::shared_ptr context, + const std::shared_ptr &context, WorkletsRunner &&workletRunner) : AudioNode(context), workletRunner_(std::move(workletRunner)) { // Pre-allocate buffers for max 128 frames and 2 channels (stereo) @@ -15,29 +15,22 @@ WorkletProcessingNode::WorkletProcessingNode( outputBuffsHandles_.resize(maxChannelCount); for (size_t i = 0; i < maxChannelCount; ++i) { - auto inputAudioArray = std::make_shared(RENDER_QUANTUM_SIZE); - inputBuffsHandles_[i] = std::make_shared(inputAudioArray); - - auto outputAudioArray = std::make_shared(RENDER_QUANTUM_SIZE); - outputBuffsHandles_[i] = std::make_shared(outputAudioArray); + inputBuffsHandles_[i] = std::make_shared(RENDER_QUANTUM_SIZE); + outputBuffsHandles_[i] = std::make_shared(RENDER_QUANTUM_SIZE); } isInitialized_ = true; } -std::shared_ptr WorkletProcessingNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr WorkletProcessingNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { size_t channelCount = std::min( static_cast(2), // Fixed to stereo for now - static_cast(processingBus->getNumberOfChannels())); + static_cast(processingBuffer->getNumberOfChannels())); // Copy input data to pre-allocated input buffers for (size_t ch = 0; ch < channelCount; ch++) { - auto channelData = processingBus->getChannel(ch)->getData(); - std::memcpy( - /* dest */ inputBuffsHandles_[ch]->data(), - /* src */ reinterpret_cast(channelData), - /* size */ framesToProcess * sizeof(float)); + inputBuffsHandles_[ch]->copy(*processingBuffer->getChannel(ch), 0, 0, framesToProcess); } // Execute the worklet @@ -70,23 +63,20 @@ std::shared_ptr WorkletProcessingNode::processNode( jsi::Value(rt, time)); }); - // Copy processed output data back to the processing bus or zero on failure + // Copy processed output data back to the processing buffer or zero on failure for (size_t ch = 0; ch < channelCount; ch++) { - auto channelData = processingBus->getChannel(ch)->getData(); + auto channelData = processingBuffer->getChannel(ch); if (result.has_value()) { // Copy processed output data - std::memcpy( - /* dest */ reinterpret_cast(channelData), - /* src */ outputBuffsHandles_[ch]->data(), - /* size */ framesToProcess * sizeof(float)); + channelData->copy(*inputBuffsHandles_[ch], 0, 0, framesToProcess); } else { // Zero the output on worklet execution failure - std::memset(channelData, 0, framesToProcess * sizeof(float)); + channelData->zero(0, framesToProcess); } } - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.h index b302e81bc..42e8ec72b 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/effects/WorkletProcessingNode.h @@ -3,9 +3,9 @@ #include #include #include -#include #include -#include +#include +#include #include #include @@ -22,10 +22,10 @@ class WorkletProcessingNode : public AudioNode { : AudioNode(context) {} protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return processingBus; + return processingBuffer; } }; #else @@ -35,12 +35,12 @@ using namespace facebook; class WorkletProcessingNode : public AudioNode { public: explicit WorkletProcessingNode( - std::shared_ptr context, + const std::shared_ptr &context, WorkletsRunner &&workletRunner); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/inputs/AudioRecorder.h b/packages/react-native-audio-api/common/cpp/audioapi/core/inputs/AudioRecorder.h index b0f1fb89c..c2b196765 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/inputs/AudioRecorder.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/inputs/AudioRecorder.h @@ -9,7 +9,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioFileWriter; class CircularAudioArray; class RecorderAdapterNode; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBuffer.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBuffer.cpp deleted file mode 100644 index 295ffb100..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBuffer.cpp +++ /dev/null @@ -1,61 +0,0 @@ -#include -#include -#include -#include - -#include -#include -#include - -namespace audioapi { - -AudioBuffer::AudioBuffer(int numberOfChannels, size_t length, float sampleRate) - : bus_(std::make_shared(length, numberOfChannels, sampleRate)) {} - -AudioBuffer::AudioBuffer(std::shared_ptr bus) { - bus_ = std::move(bus); -} - -size_t AudioBuffer::getLength() const { - return bus_->getSize(); -} - -int AudioBuffer::getNumberOfChannels() const { - return bus_->getNumberOfChannels(); -} - -float AudioBuffer::getSampleRate() const { - return bus_->getSampleRate(); -} - -double AudioBuffer::getDuration() const { - return static_cast(getLength()) / getSampleRate(); -} - -float *AudioBuffer::getChannelData(int channel) const { - return bus_->getChannel(channel)->getData(); -} - -void AudioBuffer::copyFromChannel( - float *destination, - size_t destinationLength, - int channelNumber, - size_t startInChannel) const { - memcpy( - destination, - bus_->getChannel(channelNumber)->getData() + startInChannel, - std::min(destinationLength, getLength() - startInChannel) * sizeof(float)); -} - -void AudioBuffer::copyToChannel( - const float *source, - size_t sourceLength, - int channelNumber, - size_t startInChannel) { - memcpy( - bus_->getChannel(channelNumber)->getData() + startInChannel, - source, - std::min(sourceLength, getLength() - startInChannel) * sizeof(float)); -} - -} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBuffer.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBuffer.h deleted file mode 100644 index 4a8cfea6c..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBuffer.h +++ /dev/null @@ -1,44 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace audioapi { - -class AudioBus; - -/// AudioBuffer is not thread-safe. -/// Due to that fact it should be copied when passing between threads. -class AudioBuffer { - public: - explicit AudioBuffer(int numberOfChannels, size_t length, float sampleRate); - explicit AudioBuffer(std::shared_ptr bus); - - [[nodiscard]] size_t getLength() const; - [[nodiscard]] float getSampleRate() const; - [[nodiscard]] double getDuration() const; - - [[nodiscard]] int getNumberOfChannels() const; - [[nodiscard]] float *getChannelData(int channel) const; - - void copyFromChannel( - float *destination, - size_t destinationLength, - int channelNumber, - size_t startInChannel) const; - void - copyToChannel(const float *source, size_t sourceLength, int channelNumber, size_t startInChannel); - - private: - friend class AudioBufferSourceNode; - friend class AudioBufferQueueSourceNode; - friend class AudioBufferHostObject; - - std::shared_ptr bus_; -}; - -} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp index b9fd1e1bd..f90b8195d 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.cpp @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include #include @@ -24,8 +24,8 @@ AudioBufferBaseSourceNode::AudioBufferBaseSourceNode( playbackRateParam_ = std::make_shared( options.playbackRate, MOST_NEGATIVE_SINGLE_FLOAT, MOST_POSITIVE_SINGLE_FLOAT, context); - playbackRateBus_ = - std::make_shared(RENDER_QUANTUM_SIZE * 3, channelCount_, context->getSampleRate()); + playbackRateBuffer_ = std::make_shared( + RENDER_QUANTUM_SIZE * 3, channelCount_, context->getSampleRate()); stretch_ = std::make_shared>(); } @@ -99,14 +99,14 @@ void AudioBufferBaseSourceNode::sendOnPositionChangedEvent() { } void AudioBufferBaseSourceNode::processWithPitchCorrection( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess) { size_t startOffset = 0; size_t offsetLength = 0; std::shared_ptr context = context_.lock(); if (context == nullptr) { - processingBus->zero(); + processingBuffer->zero(); return; } auto time = context->getCurrentTime(); @@ -115,12 +115,12 @@ void AudioBufferBaseSourceNode::processWithPitchCorrection( auto detune = std::clamp(detuneParam_->processKRateParam(framesToProcess, time) / 100.0f, -12.0f, 12.0f); - playbackRateBus_->zero(); + playbackRateBuffer_->zero(); auto framesNeededToStretch = static_cast(playbackRate * static_cast(framesToProcess)); updatePlaybackInfo( - playbackRateBus_, + playbackRateBuffer_, framesNeededToStretch, startOffset, offsetLength, @@ -128,14 +128,14 @@ void AudioBufferBaseSourceNode::processWithPitchCorrection( context->getCurrentSampleFrame()); if (playbackRate == 0.0f || (!isPlaying() && !isStopScheduled())) { - processingBus->zero(); + processingBuffer->zero(); return; } - processWithoutInterpolation(playbackRateBus_, startOffset, offsetLength, playbackRate); + processWithoutInterpolation(playbackRateBuffer_, startOffset, offsetLength, playbackRate); stretch_->process( - playbackRateBus_.get()[0], framesNeededToStretch, processingBus.get()[0], framesToProcess); + playbackRateBuffer_.get()[0], framesNeededToStretch, processingBuffer.get()[0], framesToProcess); if (detune != 0.0f) { stretch_->setTransposeSemitones(detune); @@ -145,20 +145,20 @@ void AudioBufferBaseSourceNode::processWithPitchCorrection( } void AudioBufferBaseSourceNode::processWithoutPitchCorrection( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess) { size_t startOffset = 0; size_t offsetLength = 0; std::shared_ptr context = context_.lock(); if (context == nullptr) { - processingBus->zero(); + processingBuffer->zero(); return; } auto computedPlaybackRate = getComputedPlaybackRateValue(framesToProcess, context->getCurrentTime()); updatePlaybackInfo( - processingBus, + processingBuffer, framesToProcess, startOffset, offsetLength, @@ -166,14 +166,14 @@ void AudioBufferBaseSourceNode::processWithoutPitchCorrection( context->getCurrentSampleFrame()); if (computedPlaybackRate == 0.0f || (!isPlaying() && !isStopScheduled())) { - processingBus->zero(); + processingBuffer->zero(); return; } if (std::fabs(computedPlaybackRate) == 1.0) { - processWithoutInterpolation(processingBus, startOffset, offsetLength, computedPlaybackRate); + processWithoutInterpolation(processingBuffer, startOffset, offsetLength, computedPlaybackRate); } else { - processWithInterpolation(processingBus, startOffset, offsetLength, computedPlaybackRate); + processWithInterpolation(processingBuffer, startOffset, offsetLength, computedPlaybackRate); } sendOnPositionChangedEvent(); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h index d44a4c702..139594fdb 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferBaseSourceNode.h @@ -9,7 +9,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioParam; struct BaseAudioBufferSourceOptions; @@ -36,7 +36,7 @@ class AudioBufferBaseSourceNode : public AudioScheduledSourceNode { // pitch correction std::shared_ptr> stretch_; - std::shared_ptr playbackRateBus_; + std::shared_ptr playbackRateBuffer_; // k-rate params std::shared_ptr detuneParam_; @@ -55,22 +55,22 @@ class AudioBufferBaseSourceNode : public AudioScheduledSourceNode { void sendOnPositionChangedEvent(); void processWithPitchCorrection( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess); void processWithoutPitchCorrection( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess); float getComputedPlaybackRateValue(int framesToProcess, double time); virtual void processWithoutInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) = 0; virtual void processWithInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) = 0; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp index d5d9268ae..dbee512b9 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.cpp @@ -4,10 +4,10 @@ #include #include #include -#include +#include #include #include -#include +#include #include #include @@ -32,7 +32,7 @@ AudioBufferQueueSourceNode::AudioBufferQueueSourceNode( int extraTailFrames = static_cast(stretch_->inputLatency() + stretch_->outputLatency()); tailBuffer_ = std::make_shared(channelCount_, extraTailFrames, context->getSampleRate()); - tailBuffer_->bus_->zero(); + tailBuffer_->zero(); } isInitialized_ = true; @@ -134,28 +134,28 @@ void AudioBufferQueueSourceNode::setOnBufferEndedCallbackId(uint64_t callbackId) } } -std::shared_ptr AudioBufferQueueSourceNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr AudioBufferQueueSourceNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { if (auto locker = Locker::tryLock(getBufferLock())) { // no audio data to fill, zero the output and return. if (buffers_.empty()) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } if (!pitchCorrection_) { - processWithoutPitchCorrection(processingBus, framesToProcess); + processWithoutPitchCorrection(processingBuffer, framesToProcess); } else { - processWithPitchCorrection(processingBus, framesToProcess); + processWithPitchCorrection(processingBuffer, framesToProcess); } handleStopScheduled(); } else { - processingBus->zero(); + processingBuffer->zero(); } - return processingBus; + return processingBuffer; } double AudioBufferQueueSourceNode::getCurrentPosition() const { @@ -184,7 +184,7 @@ void AudioBufferQueueSourceNode::sendOnBufferEndedEvent(size_t bufferId, bool is */ void AudioBufferQueueSourceNode::processWithoutInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) { @@ -198,22 +198,22 @@ void AudioBufferQueueSourceNode::processWithoutInterpolation( size_t framesLeft = offsetLength; while (framesLeft > 0) { - size_t framesToEnd = buffer->getLength() - readIndex; + size_t framesToEnd = buffer->getSize() - readIndex; size_t framesToCopy = std::min(framesToEnd, framesLeft); framesToCopy = framesToCopy > 0 ? framesToCopy : 0; assert(readIndex >= 0); assert(writeIndex >= 0); - assert(readIndex + framesToCopy <= buffer->getLength()); - assert(writeIndex + framesToCopy <= processingBus->getSize()); + assert(readIndex + framesToCopy <= buffer->getSize()); + assert(writeIndex + framesToCopy <= processingBuffer->getSize()); - processingBus->copy(buffer->bus_.get(), readIndex, writeIndex, framesToCopy); + processingBuffer->copy(*buffer, readIndex, writeIndex, framesToCopy); writeIndex += framesToCopy; readIndex += framesToCopy; framesLeft -= framesToCopy; - if (readIndex >= buffer->getLength()) { + if (readIndex >= buffer->getSize()) { playedBuffersDuration_ += buffer->getDuration(); buffers_.pop(); @@ -226,7 +226,7 @@ void AudioBufferQueueSourceNode::processWithoutInterpolation( buffers_.emplace(bufferId, tailBuffer_); addExtraTailFrames_ = false; } else { - processingBus->zero(writeIndex, framesLeft); + processingBuffer->zero(writeIndex, framesLeft); readIndex = 0; break; @@ -245,7 +245,7 @@ void AudioBufferQueueSourceNode::processWithoutInterpolation( } void AudioBufferQueueSourceNode::processWithInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) { @@ -264,7 +264,7 @@ void AudioBufferQueueSourceNode::processWithInterpolation( bool crossBufferInterpolation = false; std::shared_ptr nextBuffer = nullptr; - if (nextReadIndex >= buffer->getLength()) { + if (nextReadIndex >= buffer->getSize()) { if (buffers_.size() > 1) { auto tempQueue = buffers_; tempQueue.pop(); @@ -276,12 +276,12 @@ void AudioBufferQueueSourceNode::processWithInterpolation( } } - for (int i = 0; i < processingBus->getNumberOfChannels(); i += 1) { - float *destination = processingBus->getChannel(i)->getData(); - const float *currentSource = buffer->bus_->getChannel(i)->getData(); + for (size_t i = 0; i < processingBuffer->getNumberOfChannels(); i += 1) { + const auto destination = processingBuffer->getChannel(i)->span(); + const auto currentSource = buffer->getChannel(i)->span(); if (crossBufferInterpolation) { - const float *nextSource = nextBuffer->bus_->getChannel(i)->getData(); + const auto nextSource = nextBuffer->getChannel(i)->span(); float currentSample = currentSource[readIndex]; float nextSample = nextSource[nextReadIndex]; destination[writeIndex] = currentSample + factor * (nextSample - currentSample); @@ -296,19 +296,19 @@ void AudioBufferQueueSourceNode::processWithInterpolation( vReadIndex_ += std::abs(playbackRate); framesLeft -= 1; - if (vReadIndex_ >= static_cast(buffer->getLength())) { + if (vReadIndex_ >= static_cast(buffer->getSize())) { playedBuffersDuration_ += buffer->getDuration(); buffers_.pop(); sendOnBufferEndedEvent(bufferId, buffers_.empty()); if (buffers_.empty()) { - processingBus->zero(writeIndex, framesLeft); + processingBuffer->zero(writeIndex, framesLeft); vReadIndex_ = 0.0; break; } - vReadIndex_ = vReadIndex_ - buffer->getLength(); + vReadIndex_ = vReadIndex_ - buffer->getSize(); data = buffers_.front(); bufferId = data.first; buffer = data.second; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h index 6e017018b..7ab344fef 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferQueueSourceNode.h @@ -1,8 +1,8 @@ #pragma once -#include #include #include +#include #include #include @@ -12,7 +12,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioParam; struct BaseAudioBufferSourceOptions; @@ -37,8 +37,8 @@ class AudioBufferQueueSourceNode : public AudioBufferBaseSourceNode { void setOnBufferEndedCallbackId(uint64_t callbackId); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; double getCurrentPosition() const override; @@ -59,13 +59,13 @@ class AudioBufferQueueSourceNode : public AudioBufferBaseSourceNode { std::atomic onBufferEndedCallbackId_ = 0; // 0 means no callback void processWithoutInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) override; void processWithInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) override; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp index 517955198..a2a9ba4c0 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.cpp @@ -4,10 +4,10 @@ #include #include #include -#include +#include #include #include -#include +#include #include #include @@ -22,7 +22,7 @@ AudioBufferSourceNode::AudioBufferSourceNode( loopStart_(options.loopStart), loopEnd_(options.loopEnd) { buffer_ = std::shared_ptr(options.buffer); - alignedBus_ = std::shared_ptr(nullptr); + alignedBuffer_ = std::shared_ptr(nullptr); isInitialized_ = true; } @@ -31,7 +31,7 @@ AudioBufferSourceNode::~AudioBufferSourceNode() { Locker locker(getBufferLock()); buffer_.reset(); - alignedBus_.reset(); + alignedBuffer_.reset(); } bool AudioBufferSourceNode::getLoop() const { @@ -81,7 +81,7 @@ void AudioBufferSourceNode::setBuffer(const std::shared_ptr &buffer if (buffer == nullptr || context == nullptr) { buffer_ = std::shared_ptr(nullptr); - alignedBus_ = std::shared_ptr(nullptr); + alignedBuffer_ = std::shared_ptr(nullptr); loopEnd_ = 0; return; } @@ -89,24 +89,24 @@ void AudioBufferSourceNode::setBuffer(const std::shared_ptr &buffer buffer_ = buffer; channelCount_ = buffer_->getNumberOfChannels(); - stretch_->presetDefault(channelCount_, buffer_->getSampleRate()); + stretch_->presetDefault(static_cast(channelCount_), buffer_->getSampleRate()); if (pitchCorrection_) { int extraTailFrames = static_cast((getInputLatency() + getOutputLatency()) * context->getSampleRate()); - size_t totalSize = buffer_->getLength() + extraTailFrames; + size_t totalSize = buffer_->getSize() + extraTailFrames; - alignedBus_ = std::make_shared(totalSize, channelCount_, buffer_->getSampleRate()); - alignedBus_->copy(buffer_->bus_.get(), 0, 0, buffer_->getLength()); + alignedBuffer_ = std::make_shared(totalSize, channelCount_, buffer_->getSampleRate()); + alignedBuffer_->copy(*buffer_, 0, 0, buffer_->getSize()); - alignedBus_->zero(buffer_->getLength(), extraTailFrames); + alignedBuffer_->zero(buffer_->getSize(), extraTailFrames); } else { - alignedBus_ = std::make_shared(*buffer_->bus_); + alignedBuffer_ = std::make_shared(*buffer_); } - audioBus_ = - std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); - playbackRateBus_ = - std::make_shared(RENDER_QUANTUM_SIZE * 3, channelCount_, context->getSampleRate()); + audioBuffer_ = + std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); + playbackRateBuffer_ = std::make_shared( + RENDER_QUANTUM_SIZE * 3, channelCount_, context->getSampleRate()); loopEnd_ = buffer_->getDuration(); } @@ -118,23 +118,23 @@ void AudioBufferSourceNode::start(double when, double offset, double duration) { AudioScheduledSourceNode::stop(when + duration); } - if (!alignedBus_) { + if (!alignedBuffer_) { return; } offset = - std::min(offset, static_cast(alignedBus_->getSize()) / alignedBus_->getSampleRate()); + std::min(offset, static_cast(alignedBuffer_->getSize()) / alignedBuffer_->getSampleRate()); if (loop_) { offset = std::min(offset, loopEnd_); } - vReadIndex_ = static_cast(alignedBus_->getSampleRate() * offset); + vReadIndex_ = static_cast(alignedBuffer_->getSampleRate() * offset); } void AudioBufferSourceNode::disable() { AudioScheduledSourceNode::disable(); - alignedBus_.reset(); + alignedBuffer_.reset(); } void AudioBufferSourceNode::setOnLoopEndedCallbackId(uint64_t callbackId) { @@ -145,28 +145,28 @@ void AudioBufferSourceNode::setOnLoopEndedCallbackId(uint64_t callbackId) { } } -std::shared_ptr AudioBufferSourceNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr AudioBufferSourceNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { if (auto locker = Locker::tryLock(getBufferLock())) { // No audio data to fill, zero the output and return. - if (!alignedBus_) { - processingBus->zero(); - return processingBus; + if (!alignedBuffer_) { + processingBuffer->zero(); + return processingBuffer; } if (!pitchCorrection_) { - processWithoutPitchCorrection(processingBus, framesToProcess); + processWithoutPitchCorrection(processingBuffer, framesToProcess); } else { - processWithPitchCorrection(processingBus, framesToProcess); + processWithPitchCorrection(processingBuffer, framesToProcess); } handleStopScheduled(); } else { - processingBus->zero(); + processingBuffer->zero(); } - return processingBus; + return processingBuffer; } double AudioBufferSourceNode::getCurrentPosition() const { @@ -186,7 +186,7 @@ void AudioBufferSourceNode::sendOnLoopEndedEvent() { */ void AudioBufferSourceNode::processWithoutInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) { @@ -201,7 +201,7 @@ void AudioBufferSourceNode::processWithoutInterpolation( frameStart = static_cast(getVirtualStartFrame(context->getSampleRate())); frameEnd = static_cast(getVirtualEndFrame(context->getSampleRate())); } else { - processingBus->zero(); + processingBuffer->zero(); return; } size_t frameDelta = frameEnd - frameStart; @@ -223,18 +223,16 @@ void AudioBufferSourceNode::processWithoutInterpolation( assert(readIndex >= 0); assert(writeIndex >= 0); - assert(readIndex + framesToCopy <= alignedBus_->getSize()); - assert(writeIndex + framesToCopy <= processingBus->getSize()); + assert(readIndex + framesToCopy <= alignedBuffer_->getSize()); + assert(writeIndex + framesToCopy <= processingBuffer->getSize()); // Direction is forward, we can normally copy the data if (direction == 1) { - processingBus->copy(alignedBus_.get(), readIndex, writeIndex, framesToCopy); + processingBuffer->copy(*alignedBuffer_, readIndex, writeIndex, framesToCopy); } else { - for (int i = 0; i < framesToCopy; i += 1) { - for (int j = 0; j < processingBus->getNumberOfChannels(); j += 1) { - (*processingBus->getChannel(j))[writeIndex + i] = - (*alignedBus_->getChannel(j))[readIndex - i]; - } + for (size_t ch = 0; ch < processingBuffer->getNumberOfChannels(); ch += 1) { + processingBuffer->getChannel(ch)->copyReverse( + *alignedBuffer_->getChannel(ch), readIndex, writeIndex, framesToCopy); } } @@ -248,7 +246,7 @@ void AudioBufferSourceNode::processWithoutInterpolation( readIndex -= direction * frameDelta; if (!loop_) { - processingBus->zero(writeIndex, framesLeft); + processingBuffer->zero(writeIndex, framesLeft); playbackState_ = PlaybackState::STOP_SCHEDULED; break; } @@ -262,7 +260,7 @@ void AudioBufferSourceNode::processWithoutInterpolation( } void AudioBufferSourceNode::processWithInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) { @@ -276,7 +274,7 @@ void AudioBufferSourceNode::processWithInterpolation( vFrameStart = getVirtualStartFrame(context->getSampleRate()); vFrameEnd = getVirtualEndFrame(context->getSampleRate()); } else { - processingBus->zero(); + processingBuffer->zero(); return; } auto vFrameDelta = vFrameEnd - vFrameStart; @@ -300,9 +298,9 @@ void AudioBufferSourceNode::processWithInterpolation( nextReadIndex = loop_ ? frameStart : readIndex; } - for (int i = 0; i < processingBus->getNumberOfChannels(); i += 1) { - float *destination = processingBus->getChannel(i)->getData(); - const float *source = alignedBus_->getChannel(i)->getData(); + for (size_t i = 0; i < processingBuffer->getNumberOfChannels(); i++) { + auto destination = processingBuffer->getChannel(i)->span(); + const auto source = alignedBuffer_->getChannel(i)->span(); destination[writeIndex] = dsp::linearInterpolate(source, readIndex, nextReadIndex, factor); } @@ -315,7 +313,7 @@ void AudioBufferSourceNode::processWithInterpolation( vReadIndex_ -= static_cast(direction) * vFrameDelta; if (!loop_) { - processingBus->zero(writeIndex, framesLeft); + processingBuffer->zero(writeIndex, framesLeft); playbackState_ = PlaybackState::STOP_SCHEDULED; break; } @@ -331,7 +329,7 @@ double AudioBufferSourceNode::getVirtualStartFrame(float sampleRate) const { } double AudioBufferSourceNode::getVirtualEndFrame(float sampleRate) { - auto inputBufferLength = static_cast(alignedBus_->getSize()); + auto inputBufferLength = static_cast(alignedBuffer_->getSize()); auto loopEndFrame = loopEnd_ * sampleRate; return loop_ && loopEndFrame > 0 && loopStart_ < loopEnd_ diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h index 806d513c9..5a28a2c95 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioBufferSourceNode.h @@ -1,8 +1,8 @@ #pragma once -#include #include #include +#include #include #include @@ -11,7 +11,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioParam; struct AudioBufferSourceOptions; @@ -41,8 +41,8 @@ class AudioBufferSourceNode : public AudioBufferBaseSourceNode { void setOnLoopEndedCallbackId(uint64_t callbackId); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; double getCurrentPosition() const override; @@ -55,19 +55,19 @@ class AudioBufferSourceNode : public AudioBufferBaseSourceNode { // User provided buffer std::shared_ptr buffer_; - std::shared_ptr alignedBus_; + std::shared_ptr alignedBuffer_; std::atomic onLoopEndedCallbackId_ = 0; // 0 means no callback void sendOnLoopEndedEvent(); void processWithoutInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) override; void processWithInterpolation( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, size_t startOffset, size_t offsetLength, float playbackRate) override; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp index 10d557e0c..a2e048436 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.cpp @@ -1,10 +1,10 @@ #include #include #include -#include +#include #include #include -#include +#include #if !RN_AUDIO_API_TEST #include @@ -71,7 +71,7 @@ void AudioScheduledSourceNode::setOnEndedCallbackId(const uint64_t callbackId) { } void AudioScheduledSourceNode::updatePlaybackInfo( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess, size_t &startOffset, size_t &nonSilentFramesToProcess, @@ -118,10 +118,10 @@ void AudioScheduledSourceNode::updatePlaybackInfo( // stop will happen in the same render quantum if (stopFrame <= lastFrame && stopFrame >= firstFrame) { playbackState_ = PlaybackState::STOP_SCHEDULED; - processingBus->zero(stopFrame - firstFrame, lastFrame - stopFrame); + processingBuffer->zero(stopFrame - firstFrame, lastFrame - stopFrame); } - processingBus->zero(0, startOffset); + processingBuffer->zero(0, startOffset); return; } @@ -137,7 +137,7 @@ void AudioScheduledSourceNode::updatePlaybackInfo( assert(startOffset <= framesToProcess); assert(nonSilentFramesToProcess <= framesToProcess); - processingBus->zero(stopFrame - firstFrame, lastFrame - stopFrame); + processingBuffer->zero(stopFrame - firstFrame, lastFrame - stopFrame); return; } @@ -167,7 +167,8 @@ void AudioScheduledSourceNode::disable() { auto onEndedCallbackId = onEndedCallbackId_.load(std::memory_order_acquire); if (onEndedCallbackId != 0) { - audioEventHandlerRegistry_->invokeHandlerWithEventBody(AudioEvent::ENDED, onEndedCallbackId, {}); + audioEventHandlerRegistry_->invokeHandlerWithEventBody( + AudioEvent::ENDED, onEndedCallbackId, {}); } } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h index e39735a12..118f5dc49 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/AudioScheduledSourceNode.h @@ -54,7 +54,7 @@ class AudioScheduledSourceNode : public AudioNode { std::shared_ptr audioEventHandlerRegistry_; void updatePlaybackInfo( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess, size_t &startOffset, size_t &nonSilentFramesToProcess, diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.cpp index ec2b97ed3..2516d1b4d 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.cpp @@ -1,9 +1,9 @@ #include #include #include -#include +#include #include -#include +#include #include namespace audioapi { @@ -20,20 +20,20 @@ std::shared_ptr ConstantSourceNode::getOffsetParam() const { return offsetParam_; } -std::shared_ptr ConstantSourceNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr ConstantSourceNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { size_t startOffset = 0; size_t offsetLength = 0; std::shared_ptr context = context_.lock(); if (context == nullptr) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } updatePlaybackInfo( - processingBus, + processingBuffer, framesToProcess, startOffset, offsetLength, @@ -41,25 +41,22 @@ std::shared_ptr ConstantSourceNode::processNode( context->getCurrentSampleFrame()); if (!isPlaying() && !isStopScheduled()) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } - auto offsetBus = offsetParam_->processARateParam(framesToProcess, context->getCurrentTime()); - auto offsetChannelData = offsetBus->getChannel(0)->getData(); - for (int channel = 0; channel < processingBus->getNumberOfChannels(); ++channel) { - auto outputChannelData = processingBus->getChannel(channel)->getData(); + auto offsetChannel = + offsetParam_->processARateParam(framesToProcess, context->getCurrentTime())->getChannel(0); - std::copy( - offsetChannelData + startOffset, - offsetChannelData + startOffset + offsetLength, - outputChannelData + startOffset); + for (size_t channel = 0; channel < processingBuffer->getNumberOfChannels(); ++channel) { + processingBuffer->getChannel(channel)->copy( + *offsetChannel, startOffset, startOffset, offsetLength); } if (isStopScheduled()) { handleStopScheduled(); } - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.h index 8b83ad762..214fba989 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/ConstantSourceNode.h @@ -9,7 +9,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; struct ConstantSourceOptions; class ConstantSourceNode : public AudioScheduledSourceNode { @@ -21,8 +21,8 @@ class ConstantSourceNode : public AudioScheduledSourceNode { [[nodiscard]] std::shared_ptr getOffsetParam() const; protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.cpp index 828218ae3..62ef6bd8a 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.cpp @@ -1,9 +1,9 @@ #include #include #include -#include +#include #include -#include +#include #include #include @@ -27,7 +27,7 @@ OscillatorNode::OscillatorNode( periodicWave_ = context->getBasicWaveForm(type_); } - audioBus_ = std::make_shared(RENDER_QUANTUM_SIZE, 1, context->getSampleRate()); + audioBuffer_ = std::make_shared(RENDER_QUANTUM_SIZE, 1, context->getSampleRate()); isInitialized_ = true; } @@ -56,20 +56,20 @@ void OscillatorNode::setPeriodicWave(const std::shared_ptr &period type_ = OscillatorType::CUSTOM; } -std::shared_ptr OscillatorNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr OscillatorNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { size_t startOffset = 0; size_t offsetLength = 0; std::shared_ptr context = context_.lock(); if (context == nullptr) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } updatePlaybackInfo( - processingBus, + processingBuffer, framesToProcess, startOffset, offsetLength, @@ -77,34 +77,50 @@ std::shared_ptr OscillatorNode::processNode( context->getCurrentSampleFrame()); if (!isPlaying() && !isStopScheduled()) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } auto time = context->getCurrentTime() + static_cast(startOffset) * 1.0 / context->getSampleRate(); - auto detuneParamValues = detuneParam_->processARateParam(framesToProcess, time); - auto frequencyParamValues = frequencyParam_->processARateParam(framesToProcess, time); + auto detuneSpan = detuneParam_->processARateParam(framesToProcess, time)->getChannel(0)->span(); + auto freqSpan = frequencyParam_->processARateParam(framesToProcess, time)->getChannel(0)->span(); - for (size_t i = startOffset; i < offsetLength; i += 1) { - auto detuneRatio = std::pow(2.0f, detuneParamValues->getChannel(0)->getData()[i] / 1200.0f); - auto detunedFrequency = frequencyParamValues->getChannel(0)->getData()[i] * detuneRatio; - auto phaseIncrement = detunedFrequency * periodicWave_->getScale(); + const auto tableSize = static_cast(periodicWave_->getPeriodicWaveSize()); + const auto tableScale = periodicWave_->getScale(); + const auto numChannels = processingBuffer->getNumberOfChannels(); - float sample = periodicWave_->getSample(detunedFrequency, phase_, phaseIncrement); + auto finalPhase = phase_; - for (int j = 0; j < processingBus->getNumberOfChannels(); j += 1) { - (*processingBus->getChannel(j))[i] = sample; + for (size_t ch = 0; ch < numChannels; ch += 1) { + auto channelSpan = processingBuffer->getChannel(ch)->span(); + float currentPhase = phase_; + + for (size_t i = startOffset; i < offsetLength; i += 1) { + auto detuneRatio = detuneSpan[i] == 0 ? 1.0f : std::pow(2.0f, detuneSpan[i] / 1200.0f); + auto detunedFrequency = freqSpan[i] * detuneRatio; + auto phaseIncrement = detunedFrequency * tableScale; + + channelSpan[i] = periodicWave_->getSample(detunedFrequency, currentPhase, phaseIncrement); + + currentPhase += phaseIncrement; + + if (currentPhase >= tableSize) { + currentPhase -= tableSize; + } else if (currentPhase < 0.0f) { + currentPhase += tableSize; + } } - phase_ += phaseIncrement; - phase_ -= floor(phase_ / static_cast(periodicWave_->getPeriodicWaveSize())) * - static_cast(periodicWave_->getPeriodicWaveSize()); + if (ch == 0) { + finalPhase = currentPhase; + } } + phase_ = finalPhase; handleStopScheduled(); - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.h index cfd0908a2..c76fe956f 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/OscillatorNode.h @@ -12,7 +12,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; struct OscillatorOptions; class OscillatorNode : public AudioScheduledSourceNode { @@ -28,8 +28,8 @@ class OscillatorNode : public AudioScheduledSourceNode { void setPeriodicWave(const std::shared_ptr &periodicWave); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.cpp index 94c5539d4..fe8acfff4 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.cpp @@ -3,7 +3,7 @@ #include #include #include -#include +#include #include #include @@ -32,8 +32,8 @@ void RecorderAdapterNode::init(size_t bufferSize, int channelCount) { // This assumes that the sample rate is the same in audio context and recorder. // (recorder is not enforcing any sample rate on the system*). This means that only - // channel mixing might be required. To do so, we create an output bus with - // the desired channel count and will take advantage of the AudioBus sum method. + // channel mixing might be required. To do so, we create an output buffer with + // the desired channel count and will take advantage of the AudioBuffer sum method. // // * any allocations required by the recorder (including this method) are during recording start // or after, which means that audio context has already setup the system in 99% of sane cases. @@ -41,36 +41,36 @@ void RecorderAdapterNode::init(size_t bufferSize, int channelCount) { // we would need to add sample rate conversion as well or other weird bullshit like resampling // context output and not enforcing anything on the system output/input configuration. // A lot of words for a couple of lines of implementation :shrug: - adapterOutputBus_ = - std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); + adapterOutputBuffer_ = + std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); isInitialized_ = true; } void RecorderAdapterNode::cleanup() { isInitialized_ = false; buff_.clear(); - adapterOutputBus_.reset(); + adapterOutputBuffer_.reset(); } -std::shared_ptr RecorderAdapterNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr RecorderAdapterNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { if (!isInitialized_) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } readFrames(framesToProcess); - processingBus->sum(adapterOutputBus_.get(), ChannelInterpretation::SPEAKERS); - return processingBus; + processingBuffer->sum(*adapterOutputBuffer_, ChannelInterpretation::SPEAKERS); + return processingBuffer; } void RecorderAdapterNode::readFrames(const size_t framesToRead) { - adapterOutputBus_->zero(); + adapterOutputBuffer_->zero(); for (size_t channel = 0; channel < channelCount_; ++channel) { - buff_[channel]->read(adapterOutputBus_->getChannel(channel)->getData(), framesToRead); + buff_[channel]->read(*adapterOutputBuffer_->getChannel(channel), framesToRead); } } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.h index fdf80e11c..0e17b264d 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/RecorderAdapterNode.h @@ -10,7 +10,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; /// @brief RecorderAdapterNode is an AudioNode which adapts push Recorder into pull graph. /// It uses RingBuffer to store audio data and AudioParam to provide audio data in pull mode. @@ -28,18 +28,18 @@ class RecorderAdapterNode : public AudioNode { void init(size_t bufferSize, int channelCount); void cleanup(); - int channelCount_; - // TODO: CircularOverflowableAudioBus + int channelCount_{}; + // TODO: CircularOverflowableAudioBuffer std::vector> buff_; protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; - std::shared_ptr adapterOutputBus_; + std::shared_ptr adapterOutputBuffer_; private: - /// @brief Read audio frames from the recorder's internal circular buffer into output buss. + /// @brief Read audio frames from the recorder's internal circular buffer into output buffers. /// @note If `framesToRead` is greater than the number of available frames, it will fill empty space with silence. /// @param framesToRead Number of frames to read. void readFrames(size_t framesToRead); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.cpp index adca9b0c8..bf8c60729 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.cpp @@ -12,9 +12,8 @@ #include #include #include -#include #include -#include +#include #include #include #include @@ -35,7 +34,8 @@ StreamerNode::StreamerNode( frame_(nullptr), swrCtx_(nullptr), resampledData_(nullptr), - bufferedBus_(nullptr), + bufferedAudioBuffer_(nullptr), + bufferedAudioBufferSize_(0), audio_stream_index_(-1), maxResampledSamples_(0), processedSamples_(0) {} @@ -88,8 +88,8 @@ bool StreamerNode::initialize(const std::string &input_url) { } channelCount_ = codecpar_->ch_layout.nb_channels; - audioBus_ = - std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); + audioBuffer_ = + std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, context->getSampleRate()); auto [sender, receiver] = channels::spsc::channel< StreamingData, @@ -106,19 +106,19 @@ bool StreamerNode::initialize(const std::string &input_url) { #endif // RN_AUDIO_API_FFMPEG_DISABLED } -std::shared_ptr StreamerNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr StreamerNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { #if !RN_AUDIO_API_FFMPEG_DISABLED size_t startOffset = 0; size_t offsetLength = 0; std::shared_ptr context = context_.lock(); if (context == nullptr) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } updatePlaybackInfo( - processingBus, + processingBuffer, framesToProcess, startOffset, offsetLength, @@ -127,45 +127,35 @@ std::shared_ptr StreamerNode::processNode( isNodeFinished_.store(isFinished(), std::memory_order_release); if (!isPlaying() && !isStopScheduled()) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } - int bufferRemaining = bufferedBusSize_ - processedSamples_; + int bufferRemaining = bufferedAudioBufferSize_ - processedSamples_; int alreadyProcessed = 0; if (bufferRemaining < framesToProcess) { - if (bufferedBus_ != nullptr) { - for (int ch = 0; ch < processingBus->getNumberOfChannels(); ch++) { - memcpy( - processingBus->getChannel(ch)->getData(), - bufferedBus_->getChannel(ch)->getData() + processedSamples_, - bufferRemaining * sizeof(float)); - } + if (bufferedAudioBuffer_ != nullptr) { + processingBuffer->copy(*bufferedAudioBuffer_, processedSamples_, 0, bufferRemaining); framesToProcess -= bufferRemaining; alreadyProcessed += bufferRemaining; } StreamingData data; auto res = receiver_.try_receive(data); if (res == channels::spsc::ResponseStatus::SUCCESS) { - bufferedBus_ = std::make_shared(std::move(data.bus)); - bufferedBusSize_ = data.size; + bufferedAudioBuffer_ = std::make_shared(std::move(data.buffer)); + bufferedAudioBufferSize_ = data.size; processedSamples_ = 0; } else { - bufferedBus_ = nullptr; + bufferedAudioBuffer_ = nullptr; } } - if (bufferedBus_ != nullptr) { - for (int ch = 0; ch < processingBus->getNumberOfChannels(); ch++) { - memcpy( - processingBus->getChannel(ch)->getData() + alreadyProcessed, - bufferedBus_->getChannel(ch)->getData() + processedSamples_, - framesToProcess * sizeof(float)); - } + if (bufferedAudioBuffer_ != nullptr) { + processingBuffer->copy(*bufferedAudioBuffer_, processedSamples_, alreadyProcessed, framesToProcess); processedSamples_ += framesToProcess; } #endif // RN_AUDIO_API_FFMPEG_DISABLED - return processingBus; + return processingBuffer; } #if !RN_AUDIO_API_FFMPEG_DISABLED @@ -230,7 +220,7 @@ void StreamerNode::streamAudio() { bool StreamerNode::processFrameWithResampler( AVFrame *frame, - std::shared_ptr context) { + const std::shared_ptr &context) { // Check if we need to reallocate the resampled buffer int out_samples = swr_get_out_samples(swrCtx_, frame->nb_samples); if (out_samples > maxResampledSamples_) { @@ -267,16 +257,16 @@ bool StreamerNode::processFrameWithResampler( if (this->isFinished()) { return true; } - auto bus = AudioBus( - static_cast(converted_samples), - codecCtx_->ch_layout.nb_channels, - context->getSampleRate()); - for (int ch = 0; ch < codecCtx_->ch_layout.nb_channels; ch++) { + + AudioBuffer buffer = + AudioBuffer(converted_samples, codecCtx_->ch_layout.nb_channels, context->getSampleRate()); + + for (size_t ch = 0; ch < codecCtx_->ch_layout.nb_channels; ch++) { auto *src = reinterpret_cast(resampledData_[ch]); - float *dst = bus.getChannel(ch)->getData(); - memcpy(dst, src, converted_samples * sizeof(float)); + buffer.getChannel(ch)->copy(src, 0, 0, converted_samples); } - StreamingData data{std::move(bus), static_cast(converted_samples)}; + + StreamingData data{std::move(buffer), static_cast(converted_samples)}; sender_.send(std::move(data)); return true; } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.h index 8f8fee70e..45de420b2 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/StreamerNode.h @@ -11,7 +11,7 @@ #pragma once #include -#include +#include #if !RN_AUDIO_API_FFMPEG_DISABLED extern "C" { @@ -40,17 +40,17 @@ inline constexpr auto VERBOSE = false; inline constexpr auto CHANNEL_CAPACITY = 32; struct StreamingData { - audioapi::AudioBus bus; + audioapi::AudioBuffer buffer; size_t size; StreamingData() = default; - StreamingData(audioapi::AudioBus b, size_t s) : bus(b), size(s) {} - StreamingData(const StreamingData &data) : bus(data.bus), size(data.size) {} - StreamingData(StreamingData &&data) noexcept : bus(std::move(data.bus)), size(data.size) {} + StreamingData(audioapi::AudioBuffer b, size_t s) : buffer(b), size(s) {} + StreamingData(const StreamingData &data) : buffer(data.buffer), size(data.size) {} + StreamingData(StreamingData &&data) noexcept : buffer(std::move(data.buffer)), size(data.size) {} StreamingData &operator=(const StreamingData &data) { if (this == &data) { return *this; } - bus = data.bus; + buffer = data.buffer; size = data.size; return *this; } @@ -58,7 +58,7 @@ struct StreamingData { namespace audioapi { -class AudioBus; +class AudioBuffer; struct StreamerOptions; class StreamerNode : public AudioScheduledSourceNode { @@ -78,8 +78,8 @@ class StreamerNode : public AudioScheduledSourceNode { } protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: @@ -95,9 +95,9 @@ class StreamerNode : public AudioScheduledSourceNode { SwrContext *swrCtx_; uint8_t **resampledData_; // weird ffmpeg way of using raw byte pointers for resampled data - std::shared_ptr bufferedBus_; // audio bus for buffering hls frames - size_t bufferedBusSize_; // size of currently buffered bus - int audio_stream_index_; // index of the audio stream channel in the input + std::shared_ptr bufferedAudioBuffer_; // audio buffer for buffering hls frames + size_t bufferedAudioBufferSize_; // size of currently buffered buffer + int audio_stream_index_; // index of the audio stream channel in the input int maxResampledSamples_; size_t processedSamples_; @@ -125,12 +125,12 @@ class StreamerNode : public AudioScheduledSourceNode { * @param frame The AVFrame to resample * @return true if successful, false otherwise */ - bool processFrameWithResampler(AVFrame *frame, std::shared_ptr context); + bool processFrameWithResampler(AVFrame *frame, const std::shared_ptr &context); /** * @brief Thread function to continuously read and process audio frames * @details This function runs in a separate thread to avoid blocking the main audio processing thread - * @note It will read frames from the input stream, resample them, and store them in the buffered bus + * @note It will read frames from the input stream, resample them, and store them in the buffered buffer * @note The thread will stop when streamFlag is set to false */ void streamAudio(); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.cpp index a243742a2..09d9450a8 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.cpp @@ -6,7 +6,7 @@ namespace audioapi { WorkletSourceNode::WorkletSourceNode( - std::shared_ptr context, + const std::shared_ptr &context, WorkletsRunner &&workletRunner) : AudioScheduledSourceNode(context), workletRunner_(std::move(workletRunner)) { isInitialized_ = true; @@ -15,17 +15,16 @@ WorkletSourceNode::WorkletSourceNode( size_t outputChannelCount = this->getChannelCount(); outputBuffsHandles_.resize(outputChannelCount); for (size_t i = 0; i < outputChannelCount; ++i) { - auto audioArray = std::make_shared(RENDER_QUANTUM_SIZE); - outputBuffsHandles_[i] = std::make_shared(audioArray); + outputBuffsHandles_[i] = std::make_shared(RENDER_QUANTUM_SIZE); } } -std::shared_ptr WorkletSourceNode::processNode( - const std::shared_ptr &processingBus, +std::shared_ptr WorkletSourceNode::processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) { if (isUnscheduled() || isFinished() || !isEnabled()) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } size_t startOffset = 0; @@ -33,20 +32,27 @@ std::shared_ptr WorkletSourceNode::processNode( std::shared_ptr context = context_.lock(); if (context == nullptr) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } - updatePlaybackInfo(processingBus, framesToProcess, startOffset, nonSilentFramesToProcess, context->getSampleRate(), context->getCurrentSampleFrame()); + updatePlaybackInfo( + processingBuffer, + framesToProcess, + startOffset, + nonSilentFramesToProcess, + context->getSampleRate(), + context->getCurrentSampleFrame()); if (nonSilentFramesToProcess == 0) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } - size_t outputChannelCount = processingBus->getNumberOfChannels(); + size_t outputChannelCount = processingBuffer->getNumberOfChannels(); auto result = workletRunner_.executeOnRuntimeSync( - [this, nonSilentFramesToProcess, startOffset, time = context->getCurrentTime()](jsi::Runtime &rt) { + [this, nonSilentFramesToProcess, startOffset, time = context->getCurrentTime()]( + jsi::Runtime &rt) { auto jsiArray = jsi::Array(rt, this->outputBuffsHandles_.size()); for (size_t i = 0; i < this->outputBuffsHandles_.size(); ++i) { auto arrayBuffer = jsi::ArrayBuffer(rt, this->outputBuffsHandles_[i]); @@ -66,22 +72,19 @@ std::shared_ptr WorkletSourceNode::processNode( // If the worklet execution failed, zero the output // It might happen if the runtime is not available if (!result.has_value()) { - processingBus->zero(); - return processingBus; + processingBuffer->zero(); + return processingBuffer; } - // Copy the processed data back to the AudioBus + // Copy the processed data back to the AudioBuffer for (size_t i = 0; i < outputChannelCount; ++i) { - float *channelData = processingBus->getChannel(i)->getData(); - memcpy( - channelData + startOffset, - outputBuffsHandles_[i]->data(), - nonSilentFramesToProcess * sizeof(float)); + processingBuffer->getChannel(i)->copy( + *outputBuffsHandles_[i], 0, startOffset, nonSilentFramesToProcess); } handleStopScheduled(); - return processingBus; + return processingBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.h b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.h index ce7e01515..9596f6822 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/sources/WorkletSourceNode.h @@ -3,9 +3,9 @@ #include #include #include -#include #include -#include +#include +#include #include #include @@ -22,10 +22,10 @@ class WorkletSourceNode : public AudioScheduledSourceNode { : AudioScheduledSourceNode(context) {} protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return processingBus; + return processingBuffer; } }; #else @@ -33,12 +33,12 @@ class WorkletSourceNode : public AudioScheduledSourceNode { class WorkletSourceNode : public AudioScheduledSourceNode { public: explicit WorkletSourceNode( - std::shared_ptr context, + const std::shared_ptr &context, WorkletsRunner &&workletRunner); protected: - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override; private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.cpp index eb02e3b6b..f5bc89fcb 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.cpp @@ -1,22 +1,22 @@ #include #include -#include #include #include #include #include #include +#include #include #include #include namespace audioapi { -AudioGraphManager::Event::Event(Event &&other) { +AudioGraphManager::Event::Event(Event &&other) noexcept { *this = std::move(other); } -AudioGraphManager::Event &AudioGraphManager::Event::operator=(Event &&other) { +AudioGraphManager::Event &AudioGraphManager::Event::operator=(Event &&other) noexcept { if (this != &other) { // Clean up current resources this->~Event(); diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.h b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.h index a9496ae27..401885498 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioGraphManager.h @@ -55,8 +55,8 @@ class AudioGraphManager { EventPayloadType payloadType; EventPayload payload; - Event(Event &&other); - Event &operator=(Event &&other); + Event(Event &&other) noexcept; + Event &operator=(Event &&other) noexcept; Event() : type(ConnectionType::CONNECT), payloadType(EventPayloadType::NODES), payload() {} ~Event(); }; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioParamEventQueue.h b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioParamEventQueue.h index e69514210..37abe6ef8 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioParamEventQueue.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioParamEventQueue.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include namespace audioapi { diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.cpp index b7d467140..08f3e1c11 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include @@ -33,10 +33,10 @@ AudioRecorderCallback::AudioRecorderCallback( callbackId_(callbackId), audioEventHandlerRegistry_(audioEventHandlerRegistry) { ringBufferSize_ = std::max(bufferLength * 2, static_cast(8192)); - circularBus_.resize(channelCount_); + circularBuffer_.resize(channelCount_); for (size_t i = 0; i < channelCount_; ++i) { - circularBus_[i] = std::make_shared(ringBufferSize_); + circularBuffer_[i] = std::make_shared(ringBufferSize_); } isInitialized_.store(true, std::memory_order_release); @@ -49,34 +49,35 @@ AudioRecorderCallback::~AudioRecorderCallback() { /// @brief Emits audio data from the circular buffer when enough frames are available. /// @param flush If true, emits all available data regardless of buffer length. void AudioRecorderCallback::emitAudioData(bool flush) { - size_t sizeLimit = flush ? circularBus_[0]->getNumberOfAvailableFrames() : bufferLength_; + size_t sizeLimit = flush ? circularBuffer_[0]->getNumberOfAvailableFrames() : bufferLength_; if (sizeLimit == 0) { return; } - while (circularBus_[0]->getNumberOfAvailableFrames() >= sizeLimit) { - auto bus = std::make_shared(sizeLimit, channelCount_, sampleRate_); + while (circularBuffer_[0]->getNumberOfAvailableFrames() >= sizeLimit) { + auto buffer = std::make_shared(sizeLimit, channelCount_, sampleRate_); for (int i = 0; i < channelCount_; ++i) { - auto *outputChannel = bus->getChannel(i)->getData(); - circularBus_[i]->pop_front(outputChannel, sizeLimit); + circularBuffer_[i]->pop_front(*buffer->getChannel(i), sizeLimit); } - invokeCallback(bus, static_cast(sizeLimit)); + invokeCallback(buffer, static_cast(sizeLimit)); } } -void AudioRecorderCallback::invokeCallback(const std::shared_ptr &bus, int numFrames) { - auto audioBuffer = std::make_shared(bus); - auto audioBufferHostObject = std::make_shared(audioBuffer); +void AudioRecorderCallback::invokeCallback( + const std::shared_ptr &buffer, + int numFrames) { + auto audioBufferHostObject = std::make_shared(buffer); std::unordered_map eventPayload = {}; eventPayload.insert({"buffer", audioBufferHostObject}); eventPayload.insert({"numFrames", numFrames}); if (audioEventHandlerRegistry_) { - audioEventHandlerRegistry_->invokeHandlerWithEventBody(AudioEvent::AUDIO_READY, callbackId_, eventPayload); + audioEventHandlerRegistry_->invokeHandlerWithEventBody( + AudioEvent::AUDIO_READY, callbackId_, eventPayload); } } @@ -98,7 +99,8 @@ void AudioRecorderCallback::invokeOnErrorCallback(const std::string &message) { } std::unordered_map eventPayload = {{"message", message}}; - audioEventHandlerRegistry_->invokeHandlerWithEventBody(AudioEvent::RECORDER_ERROR, callbackId, eventPayload); + audioEventHandlerRegistry_->invokeHandlerWithEventBody( + AudioEvent::RECORDER_ERROR, callbackId, eventPayload); } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.h b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.h index 0f4b18ec8..aa37ec754 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioRecorderCallback.h @@ -10,7 +10,7 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioArray; class CircularAudioArray; class AudioEventHandlerRegistry; @@ -28,7 +28,7 @@ class AudioRecorderCallback { virtual void cleanup() = 0; void emitAudioData(bool flush = false); - void invokeCallback(const std::shared_ptr &bus, int numFrames); + void invokeCallback(const std::shared_ptr &buffer, int numFrames); void setOnErrorCallback(uint64_t callbackId); void clearOnErrorCallback(); @@ -47,8 +47,8 @@ class AudioRecorderCallback { std::shared_ptr audioEventHandlerRegistry_; - // TODO: CircularAudioBus - std::vector> circularBus_; + // TODO: CircularAudioBuffer + std::vector> circularBuffer_; static constexpr auto RECORDER_CALLBACK_SPSC_OVERFLOW_STRATEGY = channels::spsc::OverflowStrategy::OVERWRITE_ON_FULL; static constexpr auto RECORDER_CALLBACK_SPSC_WAIT_STRATEGY = diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.cpp index 3c84ebc50..9e4ff8cdd 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.cpp @@ -1,9 +1,8 @@ -#include #include #include #include #include -#include +#include #include #include #include @@ -12,12 +11,12 @@ namespace audioapi { std::vector AudioStretcher::castToInt16Buffer(AudioBuffer &buffer) { const size_t numChannels = buffer.getNumberOfChannels(); - const size_t numFrames = buffer.getLength(); + const size_t numFrames = buffer.getSize(); std::vector int16Buffer(numFrames * numChannels); for (size_t ch = 0; ch < numChannels; ++ch) { - const float *channelData = buffer.getChannelData(ch); + auto channelData = buffer.getChannel(ch)->span(); for (size_t i = 0; i < numFrames; ++i) { int16Buffer[i * numChannels + ch] = floatToInt16(channelData[i]); } @@ -31,7 +30,7 @@ std::shared_ptr AudioStretcher::changePlaybackSpeed( float playbackSpeed) { const float sampleRate = buffer.getSampleRate(); const size_t outputChannels = buffer.getNumberOfChannels(); - const size_t numFrames = buffer.getLength(); + const size_t numFrames = buffer.getSize(); if (playbackSpeed == 1.0f) { return std::make_shared(buffer); @@ -60,16 +59,16 @@ std::shared_ptr AudioStretcher::changePlaybackSpeed( stretchedBuffer.resize(outputFrames * outputChannels); stretch_deinit(stretcher); - auto audioBus = std::make_shared(outputFrames, outputChannels, sampleRate); + auto audioBuffer = std::make_shared(outputFrames, outputChannels, sampleRate); - for (int ch = 0; ch < outputChannels; ++ch) { - auto channelData = audioBus->getChannel(ch)->getData(); + for (size_t ch = 0; ch < outputChannels; ++ch) { + auto channelData = audioBuffer->getChannel(ch)->span(); for (int i = 0; i < outputFrames; ++i) { channelData[i] = int16ToFloat(stretchedBuffer[i * outputChannels + ch]); } } - return std::make_shared(audioBus); + return audioBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.h b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.h index 2fbff057e..4a8e47482 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/AudioStretcher.h @@ -5,7 +5,6 @@ namespace audioapi { -class AudioBus; class AudioBuffer; class AudioStretcher { diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.cpp b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.cpp deleted file mode 100644 index 32612d66c..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.cpp +++ /dev/null @@ -1,20 +0,0 @@ -#include -#include - -namespace audioapi { - -ParamChangeEvent::ParamChangeEvent( - double startTime, - double endTime, - float startValue, - float endValue, - std::function &&calculateValue, - ParamChangeEventType type) - : startTime_(startTime), - endTime_(endTime), - calculateValue_(std::move(calculateValue)), - startValue_(startValue), - endValue_(endValue), - type_(type) {} - -} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.h b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.hpp similarity index 84% rename from packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.h rename to packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.hpp index afe93f0f1..506109959 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/core/utils/ParamChangeEvent.hpp @@ -17,18 +17,25 @@ class ParamChangeEvent { float startValue, float endValue, std::function &&calculateValue, - ParamChangeEventType type); + ParamChangeEventType type) + : startTime_(startTime), + endTime_(endTime), + calculateValue_(std::move(calculateValue)), + startValue_(startValue), + endValue_(endValue), + type_(type) {} ParamChangeEvent(const ParamChangeEvent &other) = delete; ParamChangeEvent &operator=(const ParamChangeEvent &other) = delete; - explicit ParamChangeEvent(ParamChangeEvent &&other) noexcept + ParamChangeEvent(ParamChangeEvent &&other) noexcept : startTime_(other.startTime_), endTime_(other.endTime_), calculateValue_(std::move(other.calculateValue_)), startValue_(other.startValue_), endValue_(other.endValue_), type_(other.type_) {} + ParamChangeEvent &operator=(ParamChangeEvent &&other) noexcept { if (this != &other) { startTime_ = other.startTime_; @@ -72,11 +79,11 @@ class ParamChangeEvent { } private: - double startTime_; - double endTime_; + double startTime_ = 0.0; + double endTime_ = 0.0; std::function calculateValue_; - float startValue_; - float endValue_; + float startValue_ = 0.0f; + float endValue_ = 0.0f; ParamChangeEventType type_; }; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.cpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.cpp deleted file mode 100644 index cdfc33d63..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.cpp +++ /dev/null @@ -1,27 +0,0 @@ -#include - -namespace audioapi::dsp { -size_t timeToSampleFrame(double time, float sampleRate) { - return static_cast(time * sampleRate); -} - -double sampleFrameToTime(int sampleFrame, float sampleRate) { - return static_cast(sampleFrame) / sampleRate; -} - -float linearInterpolate(const float *source, size_t firstIndex, size_t secondIndex, float factor) { - if (firstIndex == secondIndex && firstIndex >= 1) { - return source[firstIndex] + factor * (source[firstIndex] - source[firstIndex - 1]); - } - - return source[firstIndex] + factor * (source[secondIndex] - source[firstIndex]); -} - -float linearToDecibels(float value) { - return 20 * log10f(value); -} - -float decibelsToLinear(float value) { - return pow(10, value / 20); -} -} // namespace audioapi::dsp \ No newline at end of file diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.h b/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.h deleted file mode 100644 index f636021a9..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.h +++ /dev/null @@ -1,16 +0,0 @@ -#pragma once - -#include -#include -#include - -namespace audioapi::dsp { - -size_t timeToSampleFrame(double time, float sampleRate); -double sampleFrameToTime(int sampleFrame, float sampleRate); - -float linearInterpolate(const float *source, size_t firstIndex, size_t secondIndex, float factor); - -float linearToDecibels(float value); -float decibelsToLinear(float value); -} // namespace audioapi::dsp diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.hpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.hpp new file mode 100644 index 000000000..3cdcaa348 --- /dev/null +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/AudioUtils.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include +#include + +namespace audioapi::dsp { + +[[nodiscard]] inline size_t timeToSampleFrame(double time, float sampleRate) { + return static_cast(time * sampleRate); +} + +[[nodiscard]] inline double sampleFrameToTime(int sampleFrame, float sampleRate) { + return static_cast(sampleFrame) / sampleRate; +} + +[[nodiscard]] inline float linearInterpolate( + std::span source, + size_t firstIndex, + size_t secondIndex, + float factor) { + + if (firstIndex == secondIndex && firstIndex >= 1) { + return source[firstIndex] + factor * (source[firstIndex] - source[firstIndex - 1]); + } + + return std::lerp(source[firstIndex], source[secondIndex], factor); +} + +[[nodiscard]] inline float linearToDecibels(float value) { + return 20.0f * log10f(value); +} + +[[nodiscard]] inline float decibelsToLinear(float value) { + return static_cast(pow(10, value / 20)); +} + +} // namespace audioapi::dsp diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.cpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.cpp index 812cbe34c..d10426919 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.cpp @@ -5,10 +5,10 @@ #include #endif -#include #include #include #include +#include #include #include #include @@ -17,17 +17,16 @@ namespace audioapi { Convolver::Convolver() - : _blockSize(0), + : _trueSegmentCount(0), + _blockSize(0), _segSize(0), _segCount(0), _fftComplexSize(0), _segments(), _segmentsIR(), - _fftBuffer(0), _fft(nullptr), _preMultiplied(), - _current(0), - _inputBuffer(0) {} + _current(0) {} void Convolver::reset() { _blockSize = 0; @@ -39,8 +38,12 @@ void Convolver::reset() { _segments.clear(); _segmentsIR.clear(); _preMultiplied.clear(); - _fftBuffer.zero(); - _inputBuffer.zero(); + if (_fftBuffer != nullptr) { + _fftBuffer->zero(); + } + if (_inputBuffer != nullptr) { + _inputBuffer->zero(); + } } bool Convolver::init(size_t blockSize, const audioapi::AudioArray &ir, size_t irLen) { @@ -71,7 +74,7 @@ bool Convolver::init(size_t blockSize, const audioapi::AudioArray &ir, size_t ir // complex-conjugate symmetricity _fftComplexSize = _segSize / 2 + 1; _fft = std::make_shared(static_cast(_segSize)); - _fftBuffer.resize(_segSize); + _fftBuffer = std::make_unique(_segSize); // segments preparation for (int i = 0; i < _segCount; ++i) { @@ -86,18 +89,18 @@ bool Convolver::init(size_t blockSize, const audioapi::AudioArray &ir, size_t ir const size_t samplesToCopy = std::min(_blockSize, remainingSamples); if (samplesToCopy > 0) { - memcpy(_fftBuffer.getData(), ir.getData() + i * _blockSize, samplesToCopy * sizeof(float)); + _fftBuffer->copy(ir, i * _blockSize, 0, samplesToCopy); } // Each sub filter is zero-padded to length 2B and transformed using a // 2B-point real-to-complex FFT. - memset(_fftBuffer.getData() + _blockSize, 0, _blockSize * sizeof(float)); - _fft->doFFT(_fftBuffer.getData(), segment); + _fftBuffer->zero(_blockSize, _blockSize); + _fft->doFFT(*_fftBuffer, segment); segment.at(0).imag(0.0f); // ensure DC component is real _segmentsIR.push_back(segment); } _preMultiplied = aligned_vec_complex(_fftComplexSize); - _inputBuffer.resize(_segSize); + _inputBuffer = std::make_unique(_segSize); _current = 0; return true; @@ -165,12 +168,12 @@ void pairwise_complex_multiply_fast( #endif } -void Convolver::process(float *data, float *outputData) { +void Convolver::process(const AudioArray &input, AudioArray &output) { // The input buffer acts as a 2B-point sliding window of the input signal. // With each new input block, the right half of the input buffer is shifted // to the left and the new block is stored in the right half. - memmove(_inputBuffer.getData(), _inputBuffer.getData() + _blockSize, _blockSize * sizeof(float)); - memcpy(_inputBuffer.getData() + _blockSize, data, _blockSize * sizeof(float)); + _inputBuffer->copyWithin(_blockSize, 0, _blockSize); + _inputBuffer->copy(input, 0, _blockSize, _blockSize); // All contents (DFT spectra) in the FDL are shifted up by one slot. _current = (_current > 0) ? _current - 1 : _segCount - 1; @@ -178,7 +181,7 @@ void Convolver::process(float *data, float *outputData) { // resulting in B+1 complex-conjugate symmetric DFT coefficients. The // result is stored in the first FDL slot. // _current marks first FDL slot, which is the current input block. - _fft->doFFT(_inputBuffer.getData(), _segments[_current]); + _fft->doFFT(*_inputBuffer, _segments[_current]); _segments[_current][0].imag(0.0f); // ensure DC component is real // The P sub filter spectra are pairwisely multiplied with the input spectra @@ -194,8 +197,8 @@ void Convolver::process(float *data, float *outputData) { // Of the accumulated spectral convolutions, an 2B-point complex-to-real // IFFT is computed. From the resulting 2B samples, the left half is // discarded and the right half is returned as the next output block. - _fft->doInverseFFT(_preMultiplied, _fftBuffer.getData()); + _fft->doInverseFFT(_preMultiplied, *_fftBuffer); - memcpy(outputData, _fftBuffer.getData() + _blockSize, _blockSize * sizeof(float)); + output.copy(*_fftBuffer, _blockSize, 0, _blockSize); } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.h b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.h index a356bf840..8d7b966cb 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Convolver.h @@ -19,9 +19,9 @@ class Convolver { public: Convolver(); bool init(size_t blockSize, const AudioArray &ir, size_t irLen); - void process(float *inputData, float *outputData); + void process(const AudioArray &input, AudioArray &output); void reset(); - inline size_t getSegCount() const { + [[nodiscard]] inline size_t getSegCount() const { return _trueSegmentCount; } @@ -33,11 +33,11 @@ class Convolver { size_t _fftComplexSize; std::vector _segments; std::vector _segmentsIR; - AudioArray _fftBuffer; + std::unique_ptr _fftBuffer; std::shared_ptr _fft; aligned_vec_complex _preMultiplied; size_t _current; - AudioArray _inputBuffer; + std::unique_ptr _inputBuffer; friend void pairwise_complex_multiply_fast( const aligned_vec_complex &ir, diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/FFT.h b/packages/react-native-audio-api/common/cpp/audioapi/dsp/FFT.h index db9caf47c..c7308699f 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/FFT.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/FFT.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -17,9 +18,9 @@ class FFT { ~FFT(); template - void doFFT(float *in, std::vector, Allocator> &out) { + void doFFT(const AudioArray &in, std::vector, Allocator> &out) { pffft_transform_ordered( - pffftSetup_, in, reinterpret_cast(&out[0]), work_, PFFFT_FORWARD); + pffftSetup_, in.begin(), reinterpret_cast(&out[0]), work_, PFFFT_FORWARD); // this is a possible place for bugs and mistakes // due to pffft implementation and how it stores results // keep this information in mind @@ -28,11 +29,11 @@ class FFT { } template - void doInverseFFT(std::vector, Allocator> &in, float *out) { + void doInverseFFT(std::vector, Allocator> &in, AudioArray &out) { pffft_transform_ordered( - pffftSetup_, reinterpret_cast(&in[0]), out, work_, PFFFT_BACKWARD); + pffftSetup_, reinterpret_cast(&in[0]), out.begin(), work_, PFFFT_BACKWARD); - dsp::multiplyByScalar(out, 1.0f / static_cast(size_), out, size_); + out.scale(1.0f / static_cast(size_)); } private: diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.cpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.cpp index c90f9bc9d..8dc85dcb8 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.cpp @@ -44,7 +44,7 @@ namespace audioapi { Resampler::Resampler(int maxBlockSize, int kernelSize): kernelSize_(kernelSize), kernel_(std::make_shared(kernelSize)), - stateBuffer_(std::make_shared(2 * maxBlockSize)) { + stateBuffer_(std::make_unique(2 * maxBlockSize)) { stateBuffer_->zero(); } @@ -58,35 +58,6 @@ float Resampler::computeBlackmanWindow(double x) const { return static_cast(a0 - a1 * std::cos(2.0 * PI * n) + a2 * std::cos(4.0 * PI * n)); } -float Resampler::computeConvolution(const float *stateStart, const float *kernelStart) const { - float sum = 0.0f; - int k = 0; - -#ifdef __ARM_NEON - float32x4_t vSum = vdupq_n_f32(0.0f); - - // process 4 samples at a time - for (; k <= kernelSize_ - 4; k += 4) { - float32x4_t vState = vld1q_f32(stateStart + k); - float32x4_t vKernel = vld1q_f32(kernelStart + k); - - // fused multiply-add: vSum += vState * vKernel - vSum = vmlaq_f32(vSum, vState, vKernel); - } - - // horizontal reduction: Sum the 4 lanes of vSum into a single float - sum += vgetq_lane_f32(vSum, 0); - sum += vgetq_lane_f32(vSum, 1); - sum += vgetq_lane_f32(vSum, 2); - sum += vgetq_lane_f32(vSum, 3); -#endif - for (; k < kernelSize_; ++k) { - sum += stateStart[k] * kernelStart[k]; - } - - return sum; -} - void Resampler::reset() { if (stateBuffer_) { stateBuffer_->zero(); @@ -98,7 +69,6 @@ UpSampler::UpSampler(int maxBlockSize, int kernelSize) : Resampler(maxBlockSize, } void UpSampler::initializeKernel() { - auto kData = kernel_->getData(); int halfSize = kernelSize_ / 2; double subSampleOffset = -0.5; @@ -111,39 +81,33 @@ void UpSampler::initializeKernel() { double sinc = (std::abs(x) < 1e-9) ? 1.0 : std::sin(x * PI) / (x * PI); // apply window in order smooth out the edges, because sinc extends to infinity in both directions - kData[i] = static_cast(sinc * computeBlackmanWindow(i - subSampleOffset)); + (*kernel_)[i] = static_cast(sinc * computeBlackmanWindow(i - subSampleOffset)); } // reverse kernel to match convolution implementation - std::reverse(kData, kData + kernelSize_); + kernel_->reverse(); } int UpSampler::process( - const std::shared_ptr &input, - const std::shared_ptr &output, + AudioArray& input, + AudioArray& output, int framesToProcess) { - - const float *inputData = input->getData(); - float *outputData = output->getData(); - float *state = stateBuffer_->getData(); - const float *kernel = kernel_->getData(); - // copy new input [ HISTORY | NEW DATA ] - std::memcpy(state + kernelSize_, inputData, framesToProcess * sizeof(float)); + stateBuffer_->copy(input, 0, kernelSize_, framesToProcess); int halfKernel = kernelSize_ / 2; for (int i = 0; i < framesToProcess; ++i) { // direct copy for even samples with half kernel latency compensation - outputData[2 * i] = state[kernelSize_ + i - halfKernel]; + output[2 * i] = (*stateBuffer_)[kernelSize_ + i - halfKernel]; // convolution for odd samples // symmetric Linear Phase filter has latency of half kernel size - outputData[2 * i + 1] = computeConvolution(&state[i + 1], kernel); + output[2 * i + 1] = stateBuffer_->computeConvolution(*kernel_, i + 1); } // move new data to history [ NEW DATA | EMPTY ] - std::memmove(state, state + framesToProcess, kernelSize_ * sizeof(float)); + stateBuffer_->copyWithin(framesToProcess, 0, kernelSize_); return framesToProcess * 2; } @@ -153,7 +117,6 @@ DownSampler::DownSampler(int maxBlockSize, int kernelSize) : Resampler(maxBlockS } void DownSampler::initializeKernel() { - auto kData = kernel_->getData(); int halfSize = kernelSize_ / 2; for (int i = 0; i < kernelSize_; ++i) { @@ -166,34 +129,29 @@ void DownSampler::initializeKernel() { sinc *= 0.5; // apply window in order smooth out the edges, because sinc extends to infinity in both directions - kData[i] = static_cast(sinc * computeBlackmanWindow(i)); + (*kernel_)[i] = static_cast(sinc * computeBlackmanWindow(i)); } // reverse kernel to match convolution implementation - std::reverse(kData, kData + kernelSize_); + kernel_->reverse(); } int DownSampler::process( - const std::shared_ptr &input, - const std::shared_ptr &output, + AudioArray& input, + AudioArray& output, int framesToProcess) { - const float *inputData = input->getData(); - float *outputData = output->getData(); - float *state = stateBuffer_->getData(); - const float *kernel = kernel_->getData(); - // copy new input [ HISTORY | NEW DATA ] - std::memcpy(state + kernelSize_, inputData, framesToProcess * sizeof(float)); + stateBuffer_->copy(input, 0, kernelSize_, framesToProcess); auto outputCount = framesToProcess / 2; - for (int i = 0; i < outputCount; ++i) { + for (size_t i = 0; i < outputCount; ++i) { // convolution for downsampled samples - outputData[i] = computeConvolution(&state[2 * i + 1], kernel); + output[i] = stateBuffer_->computeConvolution(*kernel_, 2 * i + 1); } // move new data to history [ NEW DATA | EMPTY ] - std::memmove(state, state + framesToProcess, kernelSize_ * sizeof(float)); + stateBuffer_->copyWithin(framesToProcess, 0, kernelSize_); return outputCount; } diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.h b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.h index 4a99f9f45..08b748f0c 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Resampler.h @@ -16,22 +16,18 @@ class Resampler { Resampler(int maxBlockSize, int kernelSize); virtual ~Resampler() = default; - virtual int process( - const std::shared_ptr &input, - const std::shared_ptr &output, - int framesToProcess) = 0; + virtual int process(AudioArray &input, AudioArray &output, int framesToProcess) = 0; void reset(); protected: [[nodiscard]] float computeBlackmanWindow(double x) const; - float computeConvolution(const float *stateStart, const float *kernelStart) const; virtual void initializeKernel() = 0; int kernelSize_; std::shared_ptr kernel_; // [ HISTORY | NEW DATA ] - std::shared_ptr stateBuffer_; + std::unique_ptr stateBuffer_; }; class UpSampler : public Resampler { @@ -39,10 +35,7 @@ class UpSampler : public Resampler { UpSampler(int maxBlockSize, int kernelSize); // N -> 2N - int process( - const std::shared_ptr &input, - const std::shared_ptr &output, - int framesToProcess) override; + int process(AudioArray &input, AudioArray &output, int framesToProcess) override; protected: void initializeKernel() final; @@ -53,10 +46,7 @@ class DownSampler : public Resampler { DownSampler(int maxBlockSize, int kernelSize); // N -> N / 2 - int process( - const std::shared_ptr &input, - const std::shared_ptr &output, - int framesToProcess) override; + int process(AudioArray &input, AudioArray &output, int framesToProcess) override; protected: void initializeKernel() final; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.cpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.cpp index 4e0ffd3fa..d965bafc6 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.cpp @@ -23,7 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include +#include #include #include @@ -97,6 +97,32 @@ void multiplyByScalarThenAddToOutput( vDSP_vsma(inputVector, 1, &scalar, outputVector, 1, outputVector, 1, numberOfElementsToProcess); } +float computeConvolution(const float *state, const float *kernel, size_t kernelSize) { + float result = 0.0f; + vDSP_conv(state, 1, kernel, 1, &result, 1, 1, kernelSize); + return result; +} + +void deinterleaveStereo( + const float * __restrict inputInterleaved, + float * __restrict outputLeft, + float * __restrict outputRight, + size_t numberOfFrames) { + float zero = 0.0f; + vDSP_vsadd(inputInterleaved, 2, &zero, outputLeft, 1, numberOfFrames); + vDSP_vsadd(inputInterleaved + 1, 2, &zero, outputRight, 1, numberOfFrames); +} + +void interleaveStereo( + const float* __restrict inputLeft, + const float* __restrict inputRight, + float* __restrict outputInterleaved, + size_t numberOfFrames) { + float zero = 0.0f; + vDSP_vsadd(inputLeft, 1, &zero, outputInterleaved, 2, numberOfFrames); + vDSP_vsadd(inputRight, 1, &zero, outputInterleaved + 1, 2, numberOfFrames); +} + #else #if defined(HAVE_X86_SSE2) @@ -654,15 +680,141 @@ void multiplyByScalarThenAddToOutput( } } -#endif +float computeConvolution(const float *state, const float *kernel, size_t kernelSize) { + float sum = 0.0f; + int k = 0; -void linearToDecibels( - const float *inputVector, - float *outputVector, - size_t numberOfElementsToProcess) { - for (int i = 0; i < numberOfElementsToProcess; i++) { - outputVector[i] = dsp::linearToDecibels(inputVector[i]); +#ifdef HAVE_ARM_NEON_INTRINSICS + float32x4_t vSum = vdupq_n_f32(0.0f); + + // process 4 samples at a time + for (; k <= kernelSize - 4; k += 4) { + float32x4_t vState = vld1q_f32(state + k); + float32x4_t vKernel = vld1q_f32(kernel + k); + + // fused multiply-add: vSum += vState * vKernel + vSum = vmlaq_f32(vSum, vState, vKernel); } + + // horizontal reduction: Sum the 4 lanes of vSum into a single float + sum += vgetq_lane_f32(vSum, 0); + sum += vgetq_lane_f32(vSum, 1); + sum += vgetq_lane_f32(vSum, 2); + sum += vgetq_lane_f32(vSum, 3); +#endif + for (; k < kernelSize; ++k) { + sum += state[k] * kernel[k]; + } + + return sum; +} + +void deinterleaveStereo( + const float * __restrict inputInterleaved, + float * __restrict outputLeft, + float * __restrict outputRight, + size_t numberOfFrames) { + + size_t n = numberOfFrames; + +#if defined(HAVE_ARM_NEON_INTRINSICS) + // process 4 frames (8 samples) at a time using NEON + size_t group = n / 4; + while (group--) { + // vld2q_f32 deinterleaves L and R into separate registers in one hardware op + float32x4x2_t v = vld2q_f32(inputInterleaved); + vst1q_f32(outputLeft, v.val[0]); + vst1q_f32(outputRight, v.val[1]); + + inputInterleaved += 8; + outputLeft += 4; + outputRight += 4; + } + n %= 4; +#elif defined(HAVE_X86_SSE2) + // process 4 frames (8 samples) at a time using SSE + size_t group = n / 4; + while (group--) { + // load two 128-bit registers (8 floats total) + __m128 s0 = _mm_loadu_ps(inputInterleaved); + __m128 s1 = _mm_loadu_ps(inputInterleaved + 4); + + // use shuffle to group the Left samples and Right samples + // mask 0x88 (2,0,2,0) picks indices 0 and 2 from both s0 and s1 + // mask 0xDD (3,1,3,1) picks indices 1 and 3 from both s0 and s1 + __m128 left_v = _mm_shuffle_ps(s0, s1, _MM_SHUFFLE(2, 0, 2, 0)); + __m128 right_v = _mm_shuffle_ps(s0, s1, _MM_SHUFFLE(3, 1, 3, 1)); + + _mm_storeu_ps(outputLeft, left_v); + _mm_storeu_ps(outputRight, right_v); + + inputInterleaved += 8; + outputLeft += 4; + outputRight += 4; + } + n %= 4; +#endif + + while (n--) { + *outputLeft++ = *inputInterleaved++; + *outputRight++ = *inputInterleaved++; + } +} + +void interleaveStereo( + const float * __restrict inputLeft, + const float * __restrict inputRight, + float * __restrict outputInterleaved, + size_t numberOfFrames) { + + size_t n = numberOfFrames; + +#if defined(HAVE_ARM_NEON_INTRINSICS) + // process 4 frames (8 samples) at a time + size_t group = n / 4; + while (group--) { + // load contiguous planar data + float32x4_t vL = vld1q_f32(inputLeft); + float32x4_t vR = vld1q_f32(inputRight); + + // vst2q_f32 takes two registers and interleaves them during the store: + float32x4x2_t vOut = { vL, vR }; + vst2q_f32(outputInterleaved, vOut); + + inputLeft += 4; + inputRight += 4; + outputInterleaved += 8; + } + n %= 4; +#elif defined(HAVE_X86_SSE2) + // process 4 frames (8 samples) at a time + size_t group = n / 4; + while (group--) { + __m128 vL = _mm_loadu_ps(inputLeft); + __m128 vR = _mm_loadu_ps(inputRight); + + // unpack low: Interleaves first two elements of each register + __m128 vLow = _mm_unpacklo_ps(vL, vR); + + // unpack high: Interleaves last two elements of each register + __m128 vHigh = _mm_unpackhi_ps(vL, vR); + + _mm_storeu_ps(outputInterleaved, vLow); + _mm_storeu_ps(outputInterleaved + 4, vHigh); + + inputLeft += 4; + inputRight += 4; + outputInterleaved += 8; + } + n %= 4; +#endif + + while (n--) { + *outputInterleaved++ = *inputLeft++; + *outputInterleaved++ = *inputRight++; + } } +#endif + } // namespace audioapi::dsp diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.h b/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.h index 0451ee125..cbbfedabb 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/VectorMath.h @@ -69,9 +69,18 @@ void multiply( // Finds the maximum magnitude of a float vector. float maximumMagnitude(const float *inputVector, size_t numberOfElementsToProcess); -void linearToDecibels( - const float *inputVector, - float *outputVector, - size_t numberOfElementsToProcess); +float computeConvolution(const float *state, const float *kernel, size_t kernelSize); + +void interleaveStereo( + const float *inputLeft, + const float *inputRight, + float *outputInterleaved, + size_t numberOfFrames); + +void deinterleaveStereo( + const float *inputInterleaved, + float *outputLeft, + float *outputRight, + size_t numberOfFrames); } // namespace audioapi::dsp diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.cpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.cpp index 9ee3c7207..8f616d2aa 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include @@ -46,7 +46,7 @@ void WaveShaper::setOversample(OverSampleType type) { } } -void WaveShaper::process(const std::shared_ptr &channelData, int framesToProcess) { +void WaveShaper::process(AudioArray &channelData, int framesToProcess) { if (curve_ == nullptr) { return; } @@ -66,40 +66,40 @@ void WaveShaper::process(const std::shared_ptr &channelData, int fra } // based on https://webaudio.github.io/web-audio-api/#WaveShaperNode -void WaveShaper::processNone(const std::shared_ptr &channelData, int framesToProcess) { - auto curveArray = curve_->getData(); +void WaveShaper::processNone(AudioArray &channelData, int framesToProcess) { auto curveSize = curve_->getSize(); - auto data = channelData->getData(); - for (int i = 0; i < framesToProcess; i++) { - float v = (static_cast(curveSize) - 1) * 0.5f * (data[i] + 1.0f); + float v = (static_cast(curveSize) - 1) * 0.5f * (channelData[i] + 1.0f); if (v <= 0) { - data[i] = curveArray[0]; + channelData[i] = (*curve_)[0]; } else if (v >= static_cast(curveSize) - 1) { - data[i] = curveArray[curveSize - 1]; + channelData[i] = (*curve_)[curveSize - 1]; } else { auto k = std::floor(v); auto f = v - k; auto kIndex = static_cast(k); - data[i] = (1 - f) * curveArray[kIndex] + f * curveArray[kIndex + 1]; + channelData[i] = (1 - f) * (*curve_)[kIndex] + f * (*curve_)[kIndex + 1]; } } } -void WaveShaper::process2x(const std::shared_ptr &channelData, int framesToProcess) { - auto outputFrames = upSampler_->process(channelData, tempBuffer2x_, framesToProcess); - processNone(tempBuffer2x_, outputFrames); - downSampler_->process(tempBuffer2x_, channelData, outputFrames); +void WaveShaper::process2x(AudioArray &channelData, int framesToProcess) { + auto outputFrames = upSampler_->process(channelData, *tempBuffer2x_, framesToProcess); + processNone(*tempBuffer2x_, outputFrames); + downSampler_->process(*tempBuffer2x_, channelData, outputFrames); } -void WaveShaper::process4x(const std::shared_ptr &channelData, int framesToProcess) { - auto upSamplerOutputFrames = upSampler_->process(channelData, tempBuffer2x_, framesToProcess); - auto upSampler2OutputFrames = upSampler2_->process(tempBuffer2x_, tempBuffer4x_, upSamplerOutputFrames); - processNone(tempBuffer4x_, upSampler2OutputFrames); - auto downSampler2OutputFrames = downSampler2_->process(tempBuffer4x_, tempBuffer2x_, upSampler2OutputFrames); - downSampler_->process(tempBuffer2x_, channelData, downSampler2OutputFrames); +void WaveShaper::process4x( + AudioArray &channelData, int framesToProcess) { + auto upSamplerOutputFrames = upSampler_->process(channelData, *tempBuffer2x_, framesToProcess); + auto upSampler2OutputFrames = + upSampler2_->process(*tempBuffer2x_, *tempBuffer4x_, upSamplerOutputFrames); + processNone(*tempBuffer4x_, upSampler2OutputFrames); + auto downSampler2OutputFrames = + downSampler2_->process(*tempBuffer4x_, *tempBuffer2x_, upSampler2OutputFrames); + downSampler_->process(*tempBuffer2x_, channelData, downSampler2OutputFrames); } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.h b/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.h index fd5a04b44..9db0bffd5 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/WaveShaper.h @@ -11,14 +11,14 @@ namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioArray; class WaveShaper { public: explicit WaveShaper(const std::shared_ptr &curve); - void process(const std::shared_ptr &channelData, int framesToProcess); + void process(AudioArray &channelData, int framesToProcess); void setCurve(const std::shared_ptr &curve); void setOversample(OverSampleType type); @@ -38,9 +38,9 @@ class WaveShaper { std::shared_ptr tempBuffer2x_; std::shared_ptr tempBuffer4x_; - void processNone(const std::shared_ptr &channelData, int framesToProcess); - void process2x(const std::shared_ptr &channelData, int framesToProcess); - void process4x(const std::shared_ptr &channelData, int framesToProcess); + void processNone(AudioArray &channelData, int framesToProcess); + void process2x(AudioArray &channelData, int framesToProcess); + void process4x(AudioArray &channelData, int framesToProcess); }; } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.cpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.cpp deleted file mode 100644 index 79f7948ec..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.cpp +++ /dev/null @@ -1,75 +0,0 @@ -#include -#include -#include - -namespace audioapi::dsp { - -void WindowFunction::forcePerfectReconstruction(float *data, int windowLength, int interval) { - for (int i = 0; i < interval; ++i) { - float sum2 = 0; - - for (int index = i; index < windowLength; index += interval) { - sum2 += data[index] * data[index]; - } - - float factor = 1 / std::sqrt(sum2); - - for (int index = i; index < windowLength; index += interval) { - data[index] *= factor; - } - } -} - -void Hann::apply(float *data, int size) const { - for (int i = 0; i < size; ++i) { - auto x = static_cast(i) / static_cast(size - 1); - auto window = 0.5f - 0.5f * cos(2 * PI * x); - data[i] = window * amplitude_; - } -} - -void Blackman::apply(float *data, int size) const { - for (int i = 0; i < size; ++i) { - auto x = static_cast(i) / static_cast(size - 1); - auto window = 0.42f - 0.5f * cos(2 * PI * x) + 0.08f * cos(4 * PI * x); - data[i] = window * amplitude_; - } -} - -void Kaiser::apply(float *data, int size) const { - for (int i = 0; i < size; ++i) { - auto r = static_cast(2 * i + 1) / static_cast(size) - 1.0f; - auto arg = std::sqrt(1 - r * r); - data[i] = bessel0(beta_ * arg) * invB0_ * amplitude_; - } -} - -float Kaiser::bandwidthToBeta(float bandwidth, bool heuristicOptimal) { - if (heuristicOptimal) { // Heuristic based on numerical search - return bandwidth + 8.0f / (bandwidth + 3.0f) * (bandwidth + 3.0f) + - 0.25f * std::max(3.0f - bandwidth, 0.0f); - } - - bandwidth = std::max(bandwidth, 2.0f); - auto alpha = std::sqrt(bandwidth * bandwidth * 0.25f - 1.0f); - return alpha * PI; -} - -void ApproximateConfinedGaussian::apply(float *data, int size) const { - auto offsetScale = getGaussian(1.0f) / (getGaussian(3.0f) + getGaussian(-1.0f)); - auto norm = 1 / (getGaussian(1.0f) - 2 * offsetScale * getGaussian(2.0f)); - for (int i = 0; i < size; ++i) { - auto r = static_cast(2 * i + 1) / static_cast(size) - 1.0f; - data[i] = norm * (getGaussian(r) - offsetScale * (getGaussian(r - 2) + getGaussian(r + 2))); - } -} - -float ApproximateConfinedGaussian::bandwidthToSigma(float bandwidth) { - return 0.3f / std::sqrt(bandwidth); -} - -float ApproximateConfinedGaussian::getGaussian(float x) const { - return std::exp(-x * x * gaussianFactor_); -} - -} // namespace audioapi::dsp diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.h b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.h deleted file mode 100644 index 8108e18a3..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.h +++ /dev/null @@ -1,96 +0,0 @@ -#pragma once - -#include - -namespace audioapi::dsp { - -// https://en.wikipedia.org/wiki/Window_function -// https://personalpages.hs-kempten.de/~vollratj/InEl/pdf/Window%20function%20-%20Wikipedia.pdf -class WindowFunction { - public: - explicit WindowFunction(float amplitude = 1.0f) : amplitude_(amplitude) {} - - virtual void apply(float *data, int size) const = 0; - // forces STFT perfect-reconstruction (WOLA) on an existing window, for a given STFT interval. - static void forcePerfectReconstruction(float *data, int windowLength, int interval); - - protected: - // 1/L = amplitude - float amplitude_; -}; - -//https://en.wikipedia.org/wiki/Hann_function -// https://www.sciencedirect.com/topics/engineering/hanning-window -// https://docs.scipy.org/doc//scipy-1.2.3/reference/generated/scipy.signal.windows.hann.html#scipy.signal.windows.hann -class Hann : public WindowFunction { - public: - explicit Hann(float amplitude = 1.0f) : WindowFunction(amplitude) {} - - void apply(float *data, int size) const override; -}; - -// https://www.sciencedirect.com/topics/engineering/blackman-window -// https://docs.scipy.org/doc//scipy-1.2.3/reference/generated/scipy.signal.windows.blackman.html#scipy.signal.windows.blackman -class Blackman : public WindowFunction { - public: - explicit Blackman(float amplitude = 1.0f) : WindowFunction(amplitude) {} - - void apply(float *data, int size) const override; -}; - -// https://en.wikipedia.org/wiki/Kaiser_window -class Kaiser : public WindowFunction { - public: - explicit Kaiser(float beta, float amplitude = 1.0f) - : WindowFunction(amplitude), beta_(beta), invB0_(1.0f / bessel0(beta)) {} - - static Kaiser - withBandwidth(float bandwidth, bool heuristicOptimal = false, float amplitude = 1.0f) { - return Kaiser(bandwidthToBeta(bandwidth, heuristicOptimal), amplitude); - } - - void apply(float *data, int size) const override; - - private: - // beta = pi * alpha - // invB0 = 1 / I0(beta) - float beta_; - float invB0_; - - // https://en.wikipedia.org/wiki/Bessel_function#Modified_Bessel_functions:_I%CE%B1,_K%CE%B1 - static inline float bessel0(float x) { - const double significanceLimit = 1e-4; - auto result = 0.0f; - auto term = 1.0f; - auto m = 1.0f; - while (term > significanceLimit) { - result += term; - ++m; - term *= (x * x) / (4 * m * m); - } - - return result; - } - static float bandwidthToBeta(float bandwidth, bool heuristicOptimal = false); -}; - -// https://www.recordingblogs.com/wiki/gaussian-window -class ApproximateConfinedGaussian : public WindowFunction { - public: - explicit ApproximateConfinedGaussian(float sigma, float amplitude = 1.0f) - : WindowFunction(amplitude), gaussianFactor_(0.0625f / (sigma * sigma)) {} - - static ApproximateConfinedGaussian withBandwidth(float bandwidth, float amplitude = 1.0f) { - return ApproximateConfinedGaussian(bandwidthToSigma(bandwidth), amplitude); - } - - void apply(float *data, int size) const override; - - private: - float gaussianFactor_; - - static float bandwidthToSigma(float bandwidth); - - [[nodiscard]] float getGaussian(float x) const; -}; -} // namespace audioapi::dsp diff --git a/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.hpp b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.hpp new file mode 100644 index 000000000..7c66985dc --- /dev/null +++ b/packages/react-native-audio-api/common/cpp/audioapi/dsp/Windows.hpp @@ -0,0 +1,196 @@ +#pragma once + +#include +#include +#include + +namespace audioapi::dsp { + +// https://en.wikipedia.org/wiki/Window_function +// https://personalpages.hs-kempten.de/~vollratj/InEl/pdf/Window%20function%20-%20Wikipedia.pdf +class WindowFunction { + public: + explicit WindowFunction(float amplitude = 1.0f) : amplitude_(amplitude) {} + + virtual void apply(std::span data) const noexcept = 0; + // forces STFT perfect-reconstruction (WOLA) on an existing window, for a given STFT interval. + static void forcePerfectReconstruction(std::span data, int interval) { + int windowLength = static_cast(data.size()); + + for (int i = 0; i < interval; ++i) { + float sum2 = 0; + + for (int index = i; index < windowLength; index += interval) { + sum2 += data[index] * data[index]; + } + + float factor = 1 / std::sqrt(sum2); + + for (int index = i; index < windowLength; index += interval) { + data[index] *= factor; + } + } + } + + protected: + // 1/L = amplitude + float amplitude_; +}; + +//https://en.wikipedia.org/wiki/Hann_function +// https://www.sciencedirect.com/topics/engineering/hanning-window +// https://docs.scipy.org/doc//scipy-1.2.3/reference/generated/scipy.signal.windows.hann.html#scipy.signal.windows.hann +class Hann : public WindowFunction { + public: + explicit Hann(float amplitude = 1.0f) : WindowFunction(amplitude) {} + + void apply(std::span data) const noexcept override { + const size_t size = data.size(); + if (size < 2) { + return; + } + + const float invSizeMinusOne = 1.0f / static_cast(size - 1); + const float constantPart = 2.0f * std::numbers::pi_v * invSizeMinusOne; + + for (size_t i = 0; i < size; ++i) { + float window = 0.5f * (1.0f - std::cos(constantPart * i)); + data[i] = window * amplitude_; + } + } +}; + +// https://www.sciencedirect.com/topics/engineering/blackman-window +// https://docs.scipy.org/doc//scipy-1.2.3/reference/generated/scipy.signal.windows.blackman.html#scipy.signal.windows.blackman +class Blackman : public WindowFunction { + public: + explicit Blackman(float amplitude = 1.0f) : WindowFunction(amplitude) {} + + void apply(std::span data) const noexcept override { + const size_t size = data.size(); + if (size < 2) { + return; + } + + const float invSizeMinusOne = 1.0f / static_cast(size - 1); + const float alpha = 2.0f * std::numbers::pi_v * invSizeMinusOne; + + for (size_t i = 0; i < size; ++i) { + const float phase = alpha * i; + // 4*PI*x is just 2 * (2*PI*x) + const float window = 0.42f - 0.50f * std::cos(phase) + 0.08f * std::cos(2.0f * phase); + data[i] = window * amplitude_; + } + } +}; + +// https://en.wikipedia.org/wiki/Kaiser_window +class Kaiser : public WindowFunction { + public: + explicit Kaiser(float beta, float amplitude = 1.0f) + : WindowFunction(amplitude), beta_(beta), invB0_(1.0f / bessel0(beta)) {} + + static Kaiser + withBandwidth(float bandwidth, bool heuristicOptimal = false, float amplitude = 1.0f) { + return Kaiser(bandwidthToBeta(bandwidth, heuristicOptimal), amplitude); + } + + void apply(std::span data) const noexcept override { + const size_t size = data.size(); + if (size == 0) { + return; + } + + const float invSize = 1.0f / static_cast(size); + const float commonScale = invB0_ * amplitude_; + + for (size_t i = 0; i < size; ++i) { + // Optimized 'r' calculation: (2i+1)/size - 1 + const float r = (static_cast(2 * i + 1) * invSize) - 1.0f; + const float arg = std::sqrt(std::max(0.0f, 1.0f - r * r)); + + data[i] = bessel0(beta_ * arg) * commonScale; + } + } + + private: + // beta = pi * alpha + // invB0 = 1 / I0(beta) + float beta_; + float invB0_; + + // https://en.wikipedia.org/wiki/Bessel_function#Modified_Bessel_functions:_I%CE%B1,_K%CE%B1 + static inline float bessel0(float x) { + const double significanceLimit = 1e-4; + auto result = 0.0f; + auto term = 1.0f; + auto m = 1.0f; + while (term > significanceLimit) { + result += term; + ++m; + term *= (x * x) / (4 * m * m); + } + + return result; + } + inline static float bandwidthToBeta(float bandwidth, bool heuristicOptimal = false) { + if (heuristicOptimal) { // Heuristic based on numerical search + return bandwidth + 8.0f / (bandwidth + 3.0f) * (bandwidth + 3.0f) + + 0.25f * std::max(3.0f - bandwidth, 0.0f); + } + + bandwidth = std::max(bandwidth, 2.0f); + auto alpha = std::sqrt(bandwidth * bandwidth * 0.25f - 1.0f); + return alpha * PI; + } +}; + +// https://www.recordingblogs.com/wiki/gaussian-window +class ApproximateConfinedGaussian : public WindowFunction { + public: + explicit ApproximateConfinedGaussian(float sigma, float amplitude = 1.0f) + : WindowFunction(amplitude), gaussianFactor_(0.0625f / (sigma * sigma)) {} + + static ApproximateConfinedGaussian withBandwidth(float bandwidth, float amplitude = 1.0f) { + return ApproximateConfinedGaussian(bandwidthToSigma(bandwidth), amplitude); + } + + void apply(std::span data) const noexcept override { + const size_t size = data.size(); + if (size == 0) + return; + + const float g1 = getGaussian(1.0f); + const float g3 = getGaussian(3.0f); + const float g_1 = getGaussian(-1.0f); + const float g2 = getGaussian(2.0f); + + const float offsetScale = g1 / (g3 + g_1); + const float norm = 1.0f / (g1 - 2.0f * offsetScale * g2); + + const float invSize = 1.0f / static_cast(size); + const float totalAmplitude = norm * amplitude_; + + for (size_t i = 0; i < size; ++i) { + const float r = (static_cast(2 * i + 1) * invSize) - 1.0f; + + const float gR = getGaussian(r); + const float gRMinus2 = getGaussian(r - 2.0f); + const float gRPlus2 = getGaussian(r + 2.0f); + + data[i] = totalAmplitude * (gR - offsetScale * (gRMinus2 + gRPlus2)); + } + } + + private: + float gaussianFactor_; + + inline static float bandwidthToSigma(float bandwidth) noexcept { + return 0.3f / std::sqrt(bandwidth); + } + + [[nodiscard]] inline float getGaussian(float x) const noexcept { + return std::exp(-x * x * gaussianFactor_); + } +}; +} // namespace audioapi::dsp diff --git a/packages/react-native-audio-api/common/cpp/audioapi/jsi/AudioArrayBuffer.cpp b/packages/react-native-audio-api/common/cpp/audioapi/jsi/AudioArrayBuffer.cpp deleted file mode 100644 index 59bf02153..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/jsi/AudioArrayBuffer.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include - -namespace audioapi { - -size_t AudioArrayBuffer::size() const { - return audioArray_->getSize() * sizeof(float); -} - -uint8_t *AudioArrayBuffer::data() { - return reinterpret_cast(audioArray_->getData()); -} - -} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/jsi/AudioArrayBuffer.h b/packages/react-native-audio-api/common/cpp/audioapi/jsi/AudioArrayBuffer.h deleted file mode 100644 index 6469ae13a..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/jsi/AudioArrayBuffer.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#include -#include - -#include -#include - -namespace audioapi { - -using namespace facebook; - -class AudioArrayBuffer : public jsi::MutableBuffer { - public: - explicit AudioArrayBuffer(const std::shared_ptr &audioArray) - : audioArray_(audioArray) {} - ~AudioArrayBuffer() override = default; - - AudioArrayBuffer(AudioArrayBuffer &&other) noexcept : audioArray_(std::move(other.audioArray_)) { - other.audioArray_ = nullptr; - } - - AudioArrayBuffer(const AudioArrayBuffer &) = delete; - AudioArrayBuffer &operator=(const AudioArrayBuffer &) = delete; - AudioArrayBuffer &operator=(AudioArrayBuffer &&other) = delete; - - [[nodiscard]] size_t size() const override; - uint8_t *data() override; - - private: - std::shared_ptr audioArray_; -}; - -} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp index 32c10d691..59aabb408 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.cpp @@ -8,12 +8,11 @@ * FFmpeg, you must comply with the terms of the LGPL for FFmpeg itself. */ -#include #if !RN_AUDIO_API_FFMPEG_DISABLED #include #endif // RN_AUDIO_API_FFMPEG_DISABLED #include -#include +#include #include namespace audioapi::ffmpegdecoder { @@ -25,8 +24,7 @@ int read_packet(void *opaque, uint8_t *buf, int buf_size) { return AVERROR_EOF; } - int bytes_to_read = - std::min(buf_size, static_cast(ctx->size - ctx->pos)); + int bytes_to_read = std::min(buf_size, static_cast(ctx->size - ctx->pos)); memcpy(buf, ctx->data + ctx->pos, bytes_to_read); ctx->pos += bytes_to_read; @@ -91,13 +89,9 @@ void convertFrameToBuffer( if (converted_samples > 0) { const size_t current_size = buffer.size(); - const size_t new_samples = - static_cast(converted_samples) * output_channel_count; + const size_t new_samples = static_cast(converted_samples) * output_channel_count; buffer.resize(current_size + new_samples); - memcpy( - buffer.data() + current_size, - resampled_data[0], - new_samples * sizeof(float)); + memcpy(buffer.data() + current_size, resampled_data[0], new_samples * sizeof(float)); framesRead += converted_samples; } } @@ -206,8 +200,7 @@ inline int findAudioStreamIndex(AVFormatContext *fmt_ctx) { bool setupDecoderContext( AVFormatContext *fmt_ctx, int &audio_stream_index, - std::unique_ptr> - &codec_ctx) { + std::unique_ptr> &codec_ctx) { audio_stream_index = findAudioStreamIndex(fmt_ctx); if (audio_stream_index == -1) { return false; @@ -241,37 +234,30 @@ std::shared_ptr decodeAudioFrames( int audio_stream_index, int sample_rate) { size_t framesRead = 0; - int output_sample_rate = - (sample_rate > 0) ? sample_rate : codec_ctx->sample_rate; + int output_sample_rate = (sample_rate > 0) ? sample_rate : codec_ctx->sample_rate; int output_channel_count = codec_ctx->ch_layout.nb_channels; std::vector decoded_buffer = readAllPcmFrames( - fmt_ctx, - codec_ctx, - output_sample_rate, - output_channel_count, - audio_stream_index, - framesRead); + fmt_ctx, codec_ctx, output_sample_rate, output_channel_count, audio_stream_index, framesRead); if (framesRead == 0 || decoded_buffer.empty()) { return nullptr; } auto outputFrames = decoded_buffer.size() / output_channel_count; - auto audioBus = std::make_shared( - outputFrames, output_channel_count, output_sample_rate); + auto audioBuffer = + std::make_shared(outputFrames, output_channel_count, output_sample_rate); - for (int ch = 0; ch < output_channel_count; ++ch) { - auto channelData = audioBus->getChannel(ch)->getData(); + for (size_t ch = 0; ch < output_channel_count; ++ch) { + auto channelData = audioBuffer->getChannel(ch)->span(); for (int i = 0; i < outputFrames; ++i) { channelData[i] = decoded_buffer[i * output_channel_count + ch]; } } - return std::make_shared(audioBus); + return audioBuffer; } -std::shared_ptr -decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) { +std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) { if (data == nullptr || size == 0) { return nullptr; } @@ -284,17 +270,9 @@ decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) { return nullptr; } - auto avio_ctx = - std::unique_ptr>( - avio_alloc_context( - io_buffer, - buffer_size, - 0, - &io_ctx, - read_packet, - nullptr, - seek_packet), - [](AVIOContext *ctx) { avio_context_free(&ctx); }); + auto avio_ctx = std::unique_ptr>( + avio_alloc_context(io_buffer, buffer_size, 0, &io_ctx, read_packet, nullptr, seek_packet), + [](AVIOContext *ctx) { avio_context_free(&ctx); }); if (avio_ctx == nullptr) { return nullptr; } @@ -310,29 +288,24 @@ decodeWithMemoryBlock(const void *data, size_t size, int sample_rate) { return nullptr; } - auto fmt_ctx = - std::unique_ptr( - raw_fmt_ctx, &avformat_free_context); + auto fmt_ctx = std::unique_ptr( + raw_fmt_ctx, &avformat_free_context); if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) { return nullptr; } - auto codec_ctx = - std::unique_ptr>( - nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); }); + auto codec_ctx = std::unique_ptr>( + nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); }); int audio_stream_index = -1; if (!setupDecoderContext(fmt_ctx.get(), audio_stream_index, codec_ctx)) { return nullptr; } - return decodeAudioFrames( - fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate); + return decodeAudioFrames(fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate); } -std::shared_ptr decodeWithFilePath( - const std::string &path, - int sample_rate) { +std::shared_ptr decodeWithFilePath(const std::string &path, int sample_rate) { if (path.empty()) { return nullptr; } @@ -341,25 +314,21 @@ std::shared_ptr decodeWithFilePath( if (avformat_open_input(&raw_fmt_ctx, path.c_str(), nullptr, nullptr) < 0) return nullptr; - auto fmt_ctx = - std::unique_ptr>( - raw_fmt_ctx, - [](AVFormatContext *ctx) { avformat_close_input(&ctx); }); + auto fmt_ctx = std::unique_ptr>( + raw_fmt_ctx, [](AVFormatContext *ctx) { avformat_close_input(&ctx); }); if (avformat_find_stream_info(fmt_ctx.get(), nullptr) < 0) { return nullptr; } - auto codec_ctx = - std::unique_ptr>( - nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); }); + auto codec_ctx = std::unique_ptr>( + nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); }); int audio_stream_index = -1; if (!setupDecoderContext(fmt_ctx.get(), audio_stream_index, codec_ctx)) { return nullptr; } - return decodeAudioFrames( - fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate); + return decodeAudioFrames(fmt_ctx.get(), codec_ctx.get(), audio_stream_index, sample_rate); } } // namespace audioapi::ffmpegdecoder diff --git a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h index f00047d7f..63b813f01 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/libs/ffmpeg/FFmpegDecoding.h @@ -8,7 +8,7 @@ * FFmpeg, you must comply with the terms of the LGPL for FFmpeg itself. */ -#include +#include #include #include #include @@ -57,19 +57,15 @@ void convertFrameToBuffer( bool setupDecoderContext( AVFormatContext *fmt_ctx, int &audio_stream_index, - std::unique_ptr - &codec_ctx); + std::unique_ptr &codec_ctx); std::shared_ptr decodeAudioFrames( AVFormatContext *fmt_ctx, AVCodecContext *codec_ctx, int audio_stream_index, int sample_rate); -std::shared_ptr -decodeWithMemoryBlock(const void *data, size_t size, int sample_rate); +std::shared_ptr decodeWithMemoryBlock(const void *data, size_t size, int sample_rate); -std::shared_ptr decodeWithFilePath( - const std::string &path, - int sample_rate); +std::shared_ptr decodeWithFilePath(const std::string &path, int sample_rate); } // namespace audioapi::ffmpegdecoder diff --git a/packages/react-native-audio-api/common/cpp/audioapi/libs/signalsmith-stretch/fft-pffft.h b/packages/react-native-audio-api/common/cpp/audioapi/libs/signalsmith-stretch/fft-pffft.h index 1c687a8c9..8206de121 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/libs/signalsmith-stretch/fft-pffft.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/libs/signalsmith-stretch/fft-pffft.h @@ -1,6 +1,8 @@ #ifndef SIGNALSMITH_LINEAR_PLATFORM_FFT_PFFFT_H #define SIGNALSMITH_LINEAR_PLATFORM_FFT_PFFFT_H +#include + #if defined(__has_include) && !__has_include("pffft/pffft.h") # include #else diff --git a/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h b/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h index 632ae86f0..c3316aeba 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/types/NodeOptions.h @@ -6,13 +6,14 @@ #include #include -#include #include #include #include #include #include #include +#include +#include namespace audioapi { struct AudioNodeOptions { @@ -55,7 +56,7 @@ struct StereoPannerOptions : AudioNodeOptions { struct ConvolverOptions : AudioNodeOptions { bool disableNormalization = false; - std::shared_ptr bus; + std::shared_ptr buffer; ConvolverOptions() { requiresTailProcessing = true; @@ -138,7 +139,7 @@ struct IIRFilterOptions : AudioNodeOptions { }; struct WaveShaperOptions : AudioNodeOptions { - std::shared_ptr curve; + std::shared_ptr curve; OverSampleType oversample = OverSampleType::OVERSAMPLE_NONE; WaveShaperOptions() { diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.cpp b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.cpp index 0e321ed80..cf935dba0 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.cpp @@ -1,121 +1,202 @@ #include #include + #include +#include +#include namespace audioapi { -AudioArray::AudioArray(size_t size) : data_(nullptr), size_(size) { - resize(size); -} - -AudioArray::AudioArray(const AudioArray &other) : data_(nullptr), size_(0) { - resize(other.size_); - - copy(&other); +AudioArray::AudioArray(size_t size) : size_(size) { + if (size_ > 0) { + data_ = std::make_unique(size_); + zero(); + } } AudioArray::AudioArray(const float *data, size_t size) : size_(size) { - data_ = new float[size_]; - memcpy(data_, data, size_ * sizeof(float)); + if (size_ > 0) { + data_ = std::make_unique(size_); + copy(data, 0, 0, size_); + } } -AudioArray::~AudioArray() { - if (data_) { - delete[] data_; - data_ = nullptr; +AudioArray::AudioArray(const AudioArray &other) : size_(other.size_) { + if (size_ > 0 && other.data_) { + data_ = std::make_unique(size_); + copy(other); } } -size_t AudioArray::getSize() const { - return size_; +AudioArray::AudioArray(audioapi::AudioArray &&other) noexcept + : data_(std::move(other.data_)), size_(std::exchange(other.size_, 0)) {} + +AudioArray &AudioArray::operator=(const audioapi::AudioArray &other) { + if (this != &other) { + if (size_ != other.size_) { + size_ = other.size_; + data_ = (size_ > 0) ? std::make_unique(size_) : nullptr; + } + + if (size_ > 0 && data_) { + copy(other); + } + } + + return *this; } -float *AudioArray::getData() const { - return data_; +AudioArray &AudioArray::operator=(audioapi::AudioArray &&other) noexcept { + if (this != &other) { + data_ = std::move(other.data_); + size_ = std::exchange(other.size_, 0); + } + + return *this; } -float &AudioArray::operator[](size_t index) { - return data_[index]; +void AudioArray::zero() noexcept { + zero(0, size_); } -const float &AudioArray::operator[](size_t index) const { - return data_[index]; +void AudioArray::zero(size_t start, size_t length) noexcept { + memset(data_.get() + start, 0, length * sizeof(float)); } -void AudioArray::normalize() { - float maxAbsValue = getMaxAbsValue(); +void AudioArray::sum(const AudioArray &source, float gain) { + sum(source, 0, 0, size_, gain); +} - if (maxAbsValue == 0.0f || maxAbsValue == 1.0f) { - return; +void AudioArray::sum( + const AudioArray &source, + size_t sourceStart, + size_t destinationStart, + size_t length, + float gain) { + if (size_ - destinationStart < length || source.size_ - sourceStart < length) [[unlikely]] { + throw std::out_of_range("Not enough data to sum two vectors."); } - dsp::multiplyByScalar(data_, 1.0f / maxAbsValue, data_, size_); + // Using restrict to inform the compiler that the source and destination do not overlap + float *__restrict dest = data_.get() + destinationStart; + const float *__restrict src = source.data_.get() + sourceStart; + + dsp::multiplyByScalarThenAddToOutput(src, gain, dest, length); } -void AudioArray::resize(size_t size) { - if (size == size_) { - if (!data_) { - data_ = new float[size]; - } +void AudioArray::multiply(const AudioArray &source) { + multiply(source, size_); +} - zero(0, size); - return; +void AudioArray::multiply(const audioapi::AudioArray &source, size_t length) { + if (size_ < length || source.size_ < length) [[unlikely]] { + throw std::out_of_range("Not enough data to perform vector multiplication."); } - delete[] data_; - size_ = size; - data_ = new float[size_]; + float *__restrict dest = data_.get(); + const float *__restrict src = source.data_.get(); - zero(0, size_); + dsp::multiply(src, dest, dest, length); } -void AudioArray::scale(float value) { - dsp::multiplyByScalar(data_, value, data_, size_); +void AudioArray::copy(const AudioArray &source) { + copy(source, 0, 0, size_); } -float AudioArray::getMaxAbsValue() const { - return dsp::maximumMagnitude(data_, size_); -} +void AudioArray::copy( + const AudioArray &source, + size_t sourceStart, + size_t destinationStart, + size_t length) { + if (source.size_ - sourceStart < length) [[unlikely]] { + throw std::out_of_range("Not enough data to copy from source."); + } -void AudioArray::zero() { - zero(0, size_); + copy(source.data_.get(), sourceStart, destinationStart, length); } -void AudioArray::zero(size_t start, size_t length) { - memset(data_ + start, 0, length * sizeof(float)); -} +void AudioArray::copy( + const float *source, + size_t sourceStart, + size_t destinationStart, + size_t length) { + if (size_ - destinationStart < length) [[unlikely]] { + throw std::out_of_range("Not enough space to copy to destination."); + } -void AudioArray::sum(const AudioArray *source) { - sum(source, 0, 0, size_); + memcpy(data_.get() + destinationStart, source + sourceStart, length * sizeof(float)); } -void AudioArray::sum(const AudioArray *source, size_t start, size_t length) { - sum(source, start, start, length); +void AudioArray::copyReverse( + const audioapi::AudioArray &source, + size_t sourceStart, + size_t destinationStart, + size_t length) { + if (size_ - destinationStart < length || source.size_ - sourceStart < length) [[unlikely]] { + throw std::out_of_range("Not enough space to copy to destination or from source."); + } + + auto dstView = this->subSpan(length, destinationStart); + auto srcView = source.span(); + const float *__restrict srcPtr = &srcView[sourceStart]; + + for (size_t i = 0; i < length; ++i) { + dstView[i] = srcPtr[-static_cast(i)]; + } } -void AudioArray::sum( - const AudioArray *source, +void AudioArray::copyTo( + float *destination, size_t sourceStart, size_t destinationStart, - size_t length) { - dsp::add( - data_ + destinationStart, source->getData() + sourceStart, data_ + destinationStart, length); + size_t length) const { + if (size_ - sourceStart < length) [[unlikely]] { + throw std::out_of_range("Not enough data to copy from source."); + } + + memcpy(destination + destinationStart, data_.get() + sourceStart, length * sizeof(float)); } -void AudioArray::copy(const AudioArray *source) { - copy(source, 0, size_); +void AudioArray::copyWithin(size_t sourceStart, size_t destinationStart, size_t length) { + if (size_ - sourceStart < length || size_ - destinationStart < length) [[unlikely]] { + throw std::out_of_range("Not enough space for moving data or data to move."); + } + + memmove(data_.get() + destinationStart, data_.get() + sourceStart, length * sizeof(float)); } -void AudioArray::copy(const AudioArray *source, size_t start, size_t length) { - copy(source, start, start, length); +void AudioArray::reverse() { + if (size_ <= 1) { + return; + } + + std::reverse(begin(), end()); } -void AudioArray::copy( - const AudioArray *source, - size_t sourceStart, - size_t destinationStart, - size_t length) { - memcpy(data_ + destinationStart, source->getData() + sourceStart, length * sizeof(float)); +void AudioArray::normalize() { + float maxAbsValue = getMaxAbsValue(); + + if (maxAbsValue == 0.0f || maxAbsValue == 1.0f) { + return; + } + + dsp::multiplyByScalar(data_.get(), 1.0f / maxAbsValue, data_.get(), size_); +} + +void AudioArray::scale(float value) { + dsp::multiplyByScalar(data_.get(), value, data_.get(), size_); +} + +float AudioArray::getMaxAbsValue() const { + return dsp::maximumMagnitude(data_.get(), size_); +} + +float AudioArray::computeConvolution(const audioapi::AudioArray &kernel, size_t startIndex) const { + if (kernel.size_ > size_ - startIndex) [[unlikely]] { + throw std::out_of_range("Kernal size exceeds available data for convolution."); + } + + return dsp::computeConvolution(data_.get() + startIndex, kernel.data_.get(), kernel.size_); } } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.h b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.h index 9291f22ff..68b5f2881 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArray.h @@ -4,46 +4,156 @@ #include #include #include +#include namespace audioapi { +/// @brief AudioArray is a simple wrapper around a float array for audio data manipulation. +/// It provides various utility functions for audio processing. +/// @note AudioArray manages its own memory and provides copy and move semantics. +/// @note Not thread-safe. class AudioArray { public: explicit AudioArray(size_t size); - AudioArray(const AudioArray &other); - /// @brief Construct AudioArray from raw float data - /// @param data Pointer to the float data - /// @param size Number of float samples + /// @brief Constructs an AudioArray from existing data. /// @note The data is copied, so it does not take ownership of the pointer AudioArray(const float *data, size_t size); - ~AudioArray(); + ~AudioArray() = default; - [[nodiscard]] size_t getSize() const; - [[nodiscard]] float *getData() const; + AudioArray(const AudioArray &other); + AudioArray(AudioArray &&other) noexcept; + AudioArray &operator=(const AudioArray &other); + AudioArray &operator=(AudioArray &&other) noexcept; - float &operator[](size_t index); - const float &operator[](size_t index) const; + [[nodiscard]] size_t getSize() const noexcept { + return size_; + } - void normalize(); - void resize(size_t size); - void scale(float value); - [[nodiscard]] float getMaxAbsValue() const; + float &operator[](size_t index) noexcept { + return data_[index]; + } + const float &operator[](size_t index) const noexcept { + return data_[index]; + } + + [[nodiscard]] float *begin() noexcept { + return data_.get(); + } + [[nodiscard]] float *end() noexcept { + return data_.get() + size_; + } + + [[nodiscard]] const float *begin() const noexcept { + return data_.get(); + } + [[nodiscard]] const float *end() const noexcept { + return data_.get() + size_; + } + + [[nodiscard]] std::span span() noexcept { + return {data_.get(), size_}; + } + + [[nodiscard]] std::span span() const noexcept { + return {data_.get(), size_}; + } + + [[nodiscard]] std::span subSpan(size_t length, size_t offset = 0) { + if (offset + length > size_) { + throw std::out_of_range("AudioArray::subSpan - offset + length exceeds array size"); + } + + return {data_.get() + offset, length}; + } + + void zero() noexcept; + void zero(size_t start, size_t length) noexcept; - void zero(); - void zero(size_t start, size_t length); + /// @brief Sums the source AudioArray into this AudioArray with an optional gain. + /// @param source The source AudioArray to sum from. + /// @param gain The gain to apply to the source before summing. Default is 1.0f. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void sum(const AudioArray &source, float gain = 1.0f); - void sum(const AudioArray *source); - void sum(const AudioArray *source, size_t start, size_t length); - void sum(const AudioArray *source, size_t sourceStart, size_t destinationStart, size_t length); + /// @brief Sums the source AudioArray into this AudioArray with an optional gain. + /// @param source The source AudioArray to sum from. + /// @param sourceStart The starting index in the source AudioArray. + /// @param destinationStart The starting index in this AudioArray. + /// @param length The number of samples to sum. + /// @param gain The gain to apply to the source before summing. Default is 1.0f. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void sum( + const AudioArray &source, + size_t sourceStart, + size_t destinationStart, + size_t length, + float gain = 1.0f); - void copy(const AudioArray *source); - void copy(const AudioArray *source, size_t start, size_t length); - void copy(const AudioArray *source, size_t sourceStart, size_t destinationStart, size_t length); + /// @brief Multiplies this AudioArray by the source AudioArray element-wise. + /// @param source The source AudioArray to multiply with. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void multiply(const AudioArray &source); + + /// @brief Multiplies this AudioArray by the source AudioArray element-wise. + /// @param source The source AudioArray to multiply with. + /// @param length The number of samples to multiply. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void multiply(const AudioArray &source, size_t length); + + /// @brief Copies source AudioArray into this AudioArray + /// @param source The source AudioArray to copy. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void copy(const AudioArray &source); + + /// @brief Copies source AudioArray into this AudioArray + /// @param source The source AudioArray to copy. + /// @param sourceStart The starting index in the source AudioArray. + /// @param destinationStart The starting index in this AudioArray. + /// @param length The number of samples to copy. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void copy(const AudioArray &source, size_t sourceStart, size_t destinationStart, size_t length); + + /// @brief Copies data from a raw float pointer into this AudioArray. + /// @param source The source float pointer to copy from. + /// @param sourceStart The starting index in the source float pointer. + /// @param destinationStart The starting index in this AudioArray. + /// @param length The number of samples to copy. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void copy(const float *source, size_t sourceStart, size_t destinationStart, size_t length); + + /// @brief Copies data from the source AudioArray in reverse order into this AudioArray. + /// @param source The source AudioArray to copy from. + /// @param sourceStart The starting index in the source AudioArray. + /// @param destinationStart The starting index in this AudioArray. + /// @param length The number of samples to copy. + /// @note Assumes that source and this are located in two distinct, non-overlapping memory locations. + void + copyReverse(const AudioArray &source, size_t sourceStart, size_t destinationStart, size_t length); + + /// @brief Copies data to a raw float pointer from this AudioArray. + /// @param destination The destination float pointer to copy to. + /// @param sourceStart The starting index in the this AudioArray. + /// @param destinationStart The starting index in the destination float pointer. + /// @param length The number of samples to copy. + /// @note Assumes that destination and this are located in two distinct, non-overlapping memory locations. + void copyTo(float *destination, size_t sourceStart, size_t destinationStart, size_t length) const; + + /// @brief Copies a sub-section of the array to another location within itself. + /// @param sourceStart The index where the data to be copied begins. + /// @param destinationStart The index where the data should be placed. + /// @param length The number of samples to copy. + void copyWithin(size_t sourceStart, size_t destinationStart, size_t length); + + void reverse(); + void normalize(); + void scale(float value); + [[nodiscard]] float getMaxAbsValue() const; + [[nodiscard]] float computeConvolution(const AudioArray &kernel, size_t startIndex = 0) const; protected: - float *data_; - size_t size_; + std::unique_ptr data_ = nullptr; + size_t size_ = 0; }; } // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArrayBuffer.hpp b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArrayBuffer.hpp new file mode 100644 index 000000000..33632bbc1 --- /dev/null +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioArrayBuffer.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include + +#if !RN_AUDIO_API_TEST +#include +using JsiBuffer = facebook::jsi::MutableBuffer; +#else +// Dummy class to inherit from nothing if testing +struct JsiBuffer {}; +#endif + +namespace audioapi { + +class AudioArrayBuffer : public JsiBuffer, public AudioArray { + public: + explicit AudioArrayBuffer(size_t size) : AudioArray(size) {}; + AudioArrayBuffer(const float *data, size_t size) : AudioArray(data, size) {}; + +#if !RN_AUDIO_API_TEST + [[nodiscard]] size_t size() const override { + return size_ * sizeof(float); + } + uint8_t *data() override { + return reinterpret_cast(data_.get()); + } +#else + [[nodiscard]] size_t size() const { + return size_ * sizeof(float); + } + uint8_t *data() { + return reinterpret_cast(data_.get()); + } +#endif +}; + +} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBuffer.cpp b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBuffer.cpp new file mode 100644 index 000000000..587afc168 --- /dev/null +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBuffer.cpp @@ -0,0 +1,493 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace audioapi { + +const float SQRT_HALF = sqrtf(0.5f); +constexpr int BLOCK_SIZE = 64; + +AudioBuffer::AudioBuffer(size_t size, int numberOfChannels, float sampleRate) + : numberOfChannels_(numberOfChannels), sampleRate_(sampleRate), size_(size) { + channels_.reserve(numberOfChannels_); + for (size_t i = 0; i < numberOfChannels_; ++i) { + channels_.emplace_back(std::make_shared(size_)); + } +} + +AudioBuffer::AudioBuffer(const AudioBuffer &other) + : numberOfChannels_(other.numberOfChannels_), + sampleRate_(other.sampleRate_), + size_(other.size_) { + channels_.reserve(numberOfChannels_); + for (const auto& channel : other.channels_) { + channels_.emplace_back(std::make_shared(*channel)); + } +} + +AudioBuffer::AudioBuffer(audioapi::AudioBuffer &&other) noexcept + : channels_(std::move(other.channels_)), + numberOfChannels_(std::exchange(other.numberOfChannels_, 0)), + sampleRate_(std::exchange(other.sampleRate_, 0.0f)), + size_(std::exchange(other.size_, 0)) {} + +AudioBuffer &AudioBuffer::operator=(const AudioBuffer &other) { + if (this != &other) { + sampleRate_ = other.sampleRate_; + + if (numberOfChannels_ != other.numberOfChannels_) { + numberOfChannels_ = other.numberOfChannels_; + size_ = other.size_; + channels_.clear(); + channels_.reserve(numberOfChannels_); + + for (const auto& channel : other.channels_) { + channels_.emplace_back(std::make_shared(*channel)); + } + + return *this; + } + + if (size_ != other.size_) { + size_ = other.size_; + } + + for (size_t i = 0; i < numberOfChannels_; ++i) { + *channels_[i] = *other.channels_[i]; + } + } + + return *this; +} + +AudioBuffer &AudioBuffer::operator=(audioapi::AudioBuffer &&other) noexcept { + if (this != &other) { + channels_ = std::move(other.channels_); + + numberOfChannels_ = std::exchange(other.numberOfChannels_, 0); + sampleRate_ = std::exchange(other.sampleRate_, 0.0f); + size_ = std::exchange(other.size_, 0); + } + return *this; +} + +AudioArray *AudioBuffer::getChannel(size_t index) const { + return channels_[index].get(); +} + +AudioArray *AudioBuffer::getChannelByType(int channelType) const { + auto it = kChannelLayouts.find(getNumberOfChannels()); + if (it == kChannelLayouts.end()) { + return nullptr; + } + const auto& channelOrder = it->second; + for (size_t i = 0; i < channelOrder.size(); ++i) { + if (channelOrder[i] == channelType) { + return getChannel(i); + } + } + + return nullptr; +} + +std::shared_ptr AudioBuffer::getSharedChannel(size_t index) const { + return channels_[index]; +} + +void AudioBuffer::zero() { + zero(0, getSize()); +} + +void AudioBuffer::zero(size_t start, size_t length) { + for (auto it = channels_.begin(); it != channels_.end(); it += 1) { + it->get()->zero(start, length); + } +} + +void AudioBuffer::sum(const AudioBuffer &source, ChannelInterpretation interpretation) { + sum(source, 0, 0, getSize(), interpretation); +} + +void AudioBuffer::sum( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length, + ChannelInterpretation interpretation) { + if (&source == this) { + return; + } + + auto numberOfSourceChannels = source.getNumberOfChannels(); + auto numberOfChannels = getNumberOfChannels(); + + if (interpretation == ChannelInterpretation::DISCRETE) { + discreteSum(source, sourceStart, destinationStart, length); + return; + } + + // Source channel count is smaller than current buffer, we need to up-mix. + if (numberOfSourceChannels < numberOfChannels) { + sumByUpMixing(source, sourceStart, destinationStart, length); + return; + } + + // Source channel count is larger than current buffer, we need to down-mix. + if (numberOfSourceChannels > numberOfChannels) { + sumByDownMixing(source, sourceStart, destinationStart, length); + return; + } + + // Source and destination channel counts are the same. Just sum the channels. + for (size_t i = 0; i < getNumberOfChannels(); ++i) { + channels_[i]->sum(*source.channels_[i], sourceStart, destinationStart, length); + } +} + +void AudioBuffer::copy(const AudioBuffer &source) { + copy(source, 0, 0, getSize()); +} + +void AudioBuffer::copy( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length) { + if (&source == this) { + return; + } + + if (source.getNumberOfChannels() == getNumberOfChannels()) { + for (size_t i = 0; i < getNumberOfChannels(); ++i) { + channels_[i]->copy(*source.channels_[i], sourceStart, destinationStart, length); + } + + return; + } + + // zero + sum is equivalent to copy, but takes care of up/down-mixing. + zero(destinationStart, length); + sum(source, sourceStart, destinationStart, length); +} + +void AudioBuffer::deinterleaveFrom(const float *source, size_t frames) { + if (frames == 0) { + return; + } + + if (numberOfChannels_ == 1) { + channels_[0]->copy(source, 0, 0, frames); + return; + } + + if (numberOfChannels_ == 2) { + dsp::deinterleaveStereo(source, channels_[0]->begin(), channels_[1]->begin(), frames); + return; + } + + float *channelsPtrs[MAX_CHANNEL_COUNT]; + for (size_t i = 0; i < numberOfChannels_; ++i) { + channelsPtrs[i] = channels_[i]->begin(); + } + + for (size_t blockStart = 0; blockStart < frames; blockStart += BLOCK_SIZE) { + size_t blockEnd = std::min(blockStart + BLOCK_SIZE, frames); + for (size_t i = blockStart; i < blockEnd; ++i) { + const float *frameSource = source + (i * numberOfChannels_); + for (size_t ch = 0; ch < numberOfChannels_; ++ch) { + channelsPtrs[ch][i] = frameSource[ch]; + } + } + } +} + +void AudioBuffer::interleaveTo(float *destination, size_t frames) const { + if (frames == 0) { + return; + } + + if (numberOfChannels_ == 1) { + channels_[0]->copyTo(destination, 0, 0, frames); + return; + } + + if (numberOfChannels_ == 2) { + dsp::interleaveStereo(channels_[0]->begin(), channels_[1]->begin(), destination, frames); + return; + } + + float *channelsPtrs[MAX_CHANNEL_COUNT]; + for (size_t i = 0; i < numberOfChannels_; ++i) { + channelsPtrs[i] = channels_[i]->begin(); + } + + for (size_t blockStart = 0; blockStart < frames; blockStart += BLOCK_SIZE) { + size_t blockEnd = std::min(blockStart + BLOCK_SIZE, frames); + for (size_t i = blockStart; i < blockEnd; ++i) { + float *frameDest = destination + (i * numberOfChannels_); + for (size_t ch = 0; ch < numberOfChannels_; ++ch) { + frameDest[ch] = channelsPtrs[ch][i]; + } + } + } +} + +void AudioBuffer::normalize() { + float maxAbsValue = this->maxAbsValue(); + + if (maxAbsValue == 0.0f || maxAbsValue == 1.0f) { + return; + } + + float scale = 1.0f / maxAbsValue; + this->scale(scale); +} + +void AudioBuffer::scale(float value) { + for (auto &channel : channels_) { + channel->scale(value); + } +} + +float AudioBuffer::maxAbsValue() const { + float maxAbsValue = 1.0f; + + for (const auto &channel : channels_) { + float channelMaxAbsValue = channel->getMaxAbsValue(); + maxAbsValue = std::max(maxAbsValue, channelMaxAbsValue); + } + + return maxAbsValue; +} + +/** + * Internal tooling - channel initialization + */ + +void AudioBuffer::discreteSum( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length) const { + auto numberOfChannels = std::min(getNumberOfChannels(), source.getNumberOfChannels()); + + // In case of source > destination, we "down-mix" and drop the extra channels. + // In case of source < destination, we "up-mix" as many channels as we have, + // leaving the remaining channels untouched. + for (size_t i = 0; i < numberOfChannels; i++) { + channels_[i]->sum(*source.channels_[i], sourceStart, destinationStart, length); + } +} + +void AudioBuffer::sumByUpMixing( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length) { + auto numberOfSourceChannels = source.getNumberOfChannels(); + auto numberOfChannels = getNumberOfChannels(); + + // Mono to stereo (1 -> 2, 4) + if (numberOfSourceChannels == 1 && (numberOfChannels == 2 || numberOfChannels == 4)) { + AudioArray *sourceChannel = source.getChannelByType(ChannelMono); + + getChannelByType(ChannelLeft)->sum(*sourceChannel, sourceStart, destinationStart, length); + getChannelByType(ChannelRight)->sum(*sourceChannel, sourceStart, destinationStart, length); + return; + } + + // Mono to 5.1 (1 -> 6) + if (numberOfSourceChannels == 1 && numberOfChannels == 6) { + AudioArray *sourceChannel = source.getChannel(0); + + getChannelByType(ChannelCenter)->sum(*sourceChannel, sourceStart, destinationStart, length); + return; + } + + // Stereo 2 to stereo 4 or 5.1 (2 -> 4, 6) + if (numberOfSourceChannels == 2 && (numberOfChannels == 4 || numberOfChannels == 6)) { + getChannelByType(ChannelLeft) + ->sum(*source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length); + getChannelByType(ChannelRight) + ->sum(*source.getChannelByType(ChannelRight), sourceStart, destinationStart, length); + return; + } + + // Stereo 4 to 5.1 (4 -> 6) + if (numberOfSourceChannels == 4 && numberOfChannels == 6) { + getChannelByType(ChannelLeft) + ->sum(*source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length); + getChannelByType(ChannelRight) + ->sum(*source.getChannelByType(ChannelRight), sourceStart, destinationStart, length); + getChannelByType(ChannelSurroundLeft) + ->sum(*source.getChannelByType(ChannelSurroundLeft), sourceStart, destinationStart, length); + getChannelByType(ChannelSurroundRight) + ->sum( + *source.getChannelByType(ChannelSurroundRight), sourceStart, destinationStart, length); + return; + } + + discreteSum(source, sourceStart, destinationStart, length); +} + +void AudioBuffer::sumByDownMixing( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length) { + auto numberOfSourceChannels = source.getNumberOfChannels(); + auto numberOfChannels = getNumberOfChannels(); + + // Stereo to mono (2 -> 1): output += 0.5 * (input.left + input.right). + if (numberOfSourceChannels == 2 && numberOfChannels == 1) { + auto destinationData = getChannelByType(ChannelMono); + + destinationData->sum( + *source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length, 0.5f); + destinationData->sum( + *source.getChannelByType(ChannelRight), sourceStart, destinationStart, length, 0.5f); + return; + } + + // Stereo 4 to mono (4 -> 1): + // output += 0.25 * (input.left + input.right + input.surroundLeft + + // input.surroundRight) + if (numberOfSourceChannels == 4 && numberOfChannels == 1) { + auto destinationData = getChannelByType(ChannelMono); + + destinationData->sum( + *source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length, 0.25f); + destinationData->sum( + *source.getChannelByType(ChannelRight), sourceStart, destinationStart, length, 0.25f); + destinationData->sum( + *source.getChannelByType(ChannelSurroundLeft), + sourceStart, + destinationStart, + length, + 0.25f); + destinationData->sum( + *source.getChannelByType(ChannelSurroundRight), + sourceStart, + destinationStart, + length, + 0.25f); + return; + } + + // 5.1 to mono (6 -> 1): + // output += sqrt(1/2) * (input.left + input.right) + input.center + 0.5 * + // (input.surroundLeft + input.surroundRight) + if (numberOfSourceChannels == 6 && numberOfChannels == 1) { + auto destinationData = getChannelByType(ChannelMono); + + destinationData->sum( + *source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length, SQRT_HALF); + destinationData->sum( + *source.getChannelByType(ChannelRight), sourceStart, destinationStart, length, SQRT_HALF); + destinationData->sum( + *source.getChannelByType(ChannelCenter), sourceStart, destinationStart, length); + destinationData->sum( + *source.getChannelByType(ChannelSurroundLeft), sourceStart, destinationStart, length, 0.5f); + destinationData->sum( + *source.getChannelByType(ChannelSurroundRight), + sourceStart, + destinationStart, + length, + 0.5f); + return; + } + + // Stereo 4 to stereo 2 (4 -> 2): + // output.left += 0.5 * (input.left + input.surroundLeft) + // output.right += 0.5 * (input.right + input.surroundRight) + if (numberOfSourceChannels == 4 && numberOfChannels == 2) { + auto destinationLeft = getChannelByType(ChannelLeft); + auto destinationRight = getChannelByType(ChannelRight); + + destinationLeft->sum( + *source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length, 0.5f); + destinationLeft->sum( + *source.getChannelByType(ChannelSurroundLeft), sourceStart, destinationStart, length, 0.5f); + + destinationRight->sum( + *source.getChannelByType(ChannelRight), sourceStart, destinationStart, length, 0.5f); + destinationRight->sum( + *source.getChannelByType(ChannelSurroundRight), + sourceStart, + destinationStart, + length, + 0.5f); + return; + } + + // 5.1 to stereo (6 -> 2): + // output.left += input.left + sqrt(1/2) * (input.center + input.surroundLeft) + // output.right += input.right + sqrt(1/2) * (input.center + + // input.surroundRight) + if (numberOfSourceChannels == 6 && numberOfChannels == 2) { + auto destinationLeft = getChannelByType(ChannelLeft); + auto destinationRight = getChannelByType(ChannelRight); + + destinationLeft->sum( + *source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length); + destinationLeft->sum( + *source.getChannelByType(ChannelCenter), sourceStart, destinationStart, length, SQRT_HALF); + destinationLeft->sum( + *source.getChannelByType(ChannelSurroundLeft), + sourceStart, + destinationStart, + length, + SQRT_HALF); + + destinationRight->sum( + *source.getChannelByType(ChannelRight), sourceStart, destinationStart, length); + destinationRight->sum( + *source.getChannelByType(ChannelCenter), sourceStart, destinationStart, length, SQRT_HALF); + destinationRight->sum( + *source.getChannelByType(ChannelSurroundRight), + sourceStart, + destinationStart, + length, + SQRT_HALF); + return; + } + + // 5.1 to stereo 4 (6 -> 4): + // output.left += input.left + sqrt(1/2) * input.center + // output.right += input.right + sqrt(1/2) * input.center + // output.surroundLeft += input.surroundLeft + // output.surroundRight += input.surroundRight + if (numberOfSourceChannels == 6 && numberOfChannels == 4) { + auto destinationLeft = getChannelByType(ChannelLeft); + auto destinationRight = getChannelByType(ChannelRight); + auto destinationSurroundLeft = getChannelByType(ChannelSurroundLeft); + auto destinationSurroundRight = getChannelByType(ChannelSurroundRight); + + destinationLeft->sum( + *source.getChannelByType(ChannelLeft), sourceStart, destinationStart, length); + destinationLeft->sum( + *source.getChannelByType(ChannelCenter), sourceStart, destinationStart, length, SQRT_HALF); + + destinationRight->sum( + *source.getChannelByType(ChannelRight), sourceStart, destinationStart, length); + destinationRight->sum( + *source.getChannelByType(ChannelCenter), sourceStart, destinationStart, length, SQRT_HALF); + + destinationSurroundLeft->sum( + *source.getChannelByType(ChannelSurroundLeft), sourceStart, destinationStart, length); + destinationSurroundRight->sum( + *source.getChannelByType(ChannelSurroundRight), sourceStart, destinationStart, length); + return; + } + + discreteSum(source, sourceStart, destinationStart, length); +} + +} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBuffer.h b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBuffer.h new file mode 100644 index 000000000..8b17f5301 --- /dev/null +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBuffer.h @@ -0,0 +1,177 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace audioapi { + +class AudioArrayBuffer; + +class AudioBuffer { + public: + enum { + ChannelMono = 0, + ChannelLeft = 0, + ChannelRight = 1, + ChannelCenter = 2, + ChannelLFE = 3, + ChannelSurroundLeft = 4, + ChannelSurroundRight = 5, + }; + + explicit AudioBuffer() = default; + explicit AudioBuffer(size_t size, int numberOfChannels, float sampleRate); + AudioBuffer(const AudioBuffer &other); + AudioBuffer(AudioBuffer &&other) noexcept; + AudioBuffer &operator=(const AudioBuffer &other); + AudioBuffer &operator=(AudioBuffer &&other) noexcept; + ~AudioBuffer() = default; + + [[nodiscard]] size_t getNumberOfChannels() const noexcept { + return numberOfChannels_; + } + [[nodiscard]] float getSampleRate() const noexcept { + return sampleRate_; + } + [[nodiscard]] size_t getSize() const noexcept { + return size_; + } + + [[nodiscard]] double getDuration() const noexcept { + return static_cast(size_) / static_cast(getSampleRate()); + } + + /// @brief Get the AudioArray for a specific channel index. + /// @param index The channel index. + /// @return Pointer to the AudioArray for the specified channel - not owning. + [[nodiscard]] AudioArray *getChannel(size_t index) const; + + /// @brief Get the AudioArray for a specific channel type. + /// @param channelType The channel type: ChannelMono = 0, ChannelLeft = 0, + /// ChannelRight = 1, ChannelCenter = 2, ChannelLFE = 3, + /// ChannelSurroundLeft = 4, ChannelSurroundRight = 5 + /// @return Pointer to the AudioArray for the specified channel type - not owning. + [[nodiscard]] AudioArray *getChannelByType(int channelType) const; + + /// @brief Get a copy of shared pointer to the AudioArray for a specific channel index. + /// @param index The channel index. + /// @return Copy of shared pointer to the AudioArray for the specified channel + [[nodiscard]] std::shared_ptr getSharedChannel(size_t index) const; + + AudioArray &operator[](size_t index) { + return *channels_[index]; + } + const AudioArray &operator[](size_t index) const { + return *channels_[index]; + } + + void zero(); + void zero(size_t start, size_t length); + + /// @brief Sums audio data from a source AudioBuffer into this AudioBuffer. + /// @param source The source AudioBuffer to sum from. + /// @param interpretation The channel interpretation to use for summing (default is SPEAKERS). + /// @note Handles up-mixing and down-mixing based on the number of channels in both buffers. + /// Assumes that source and this are located in two distinct, non-overlapping memory locations. + void sum( + const AudioBuffer &source, + ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS); + + /// @brief Sums audio data from a source AudioBuffer into this AudioBuffer. + /// @param source The source AudioBuffer to sum from. + /// @param sourceStart The starting index in the source AudioBuffer. + /// @param destinationStart The starting index in this AudioBuffer. + /// @param length The number of samples to sum. + /// @param interpretation The channel interpretation to use for summing (default is SPEAKERS). + /// @note Handles up-mixing and down-mixing based on the number of channels in both buffers. + /// Assumes that source and this are located in two distinct, non-overlapping memory locations. + void sum( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length, + ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS); + + /// @brief Copies audio data from a source AudioBuffer into this AudioBuffer. + /// @param source The source AudioBuffer to copy from. + /// @note Handles up-mixing and down-mixing based on the number of channels in both buffers. + /// Assumes that source and this are located in two distinct, non-overlapping memory locations. + void copy(const AudioBuffer &source); + + /// @brief Copies audio data from a source AudioBuffer into this AudioBuffer. + /// @param source The source AudioBuffer to copy from. + /// @param sourceStart The starting index in the source AudioBuffer. + /// @param destinationStart The starting index in this AudioBuffer. + /// @param length The number of samples to copy. + /// @note Handles up-mixing and down-mixing based on the number of channels in both buffers. + /// Assumes that source and this are located in two distinct, non-overlapping memory locations. + void copy(const AudioBuffer &source, size_t sourceStart, size_t destinationStart, size_t length); + + /// @brief Deinterleave audio data from a source buffer into this AudioBuffer. + /// @param source Pointer to the source buffer containing interleaved audio data. + /// @param frames Number of frames to deinterleave from the source buffer. + /// @note The source buffer should contain interleaved audio data according to the number of channels in this AudioBuffer. + /// Example of interleaved data for stereo (2 channels): + /// [L0, R0, L1, R1, L2, R2, ...] + /// Assumes that source and this are located in two distinct, non-overlapping memory locations. + void deinterleaveFrom(const float *source, size_t frames); + + /// @brief Interleave audio data from this AudioBuffer into a destination buffer. + /// @param destination Pointer to the destination buffer where interleaved audio data will be written. + /// @param frames Number of frames to interleave into the destination buffer. + /// @note The destination buffer should have enough space to hold the interleaved data + /// according to the number of channels in this AudioBuffer. + /// Example of interleaved data for stereo (2 channels): + /// [L0, R0, L1, R1, L2, R2, ...] + /// Assumes that this and destination are located in two distinct, non-overlapping memory locations. + void interleaveTo(float *destination, size_t frames) const; + + void normalize(); + void scale(float value); + [[nodiscard]] float maxAbsValue() const; + + private: + std::vector> channels_; + + size_t numberOfChannels_ = 0; + float sampleRate_ = 0.0f; + size_t size_ = 0; + + inline static const std::unordered_map> kChannelLayouts = { + {1, {ChannelMono}}, + {2, {ChannelLeft, ChannelRight}}, + {4, {ChannelLeft, ChannelRight, ChannelSurroundLeft, ChannelSurroundRight}}, + {5, {ChannelLeft, ChannelRight, ChannelCenter, ChannelSurroundLeft, ChannelSurroundRight}}, + {6, + {ChannelLeft, + ChannelRight, + ChannelCenter, + ChannelLFE, + ChannelSurroundLeft, + ChannelSurroundRight}}}; + + void discreteSum( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length) const; + void sumByUpMixing( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length); + void sumByDownMixing( + const AudioBuffer &source, + size_t sourceStart, + size_t destinationStart, + size_t length); +}; + +} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBus.cpp b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBus.cpp deleted file mode 100644 index 5276b434b..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBus.cpp +++ /dev/null @@ -1,557 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -// Implementation of channel summing/mixing is based on the WebKit approach, -// source: -// https://github.com/WebKit/WebKit/blob/main/Source/WebCore/platform/audio/AudioBus.cpp - -const float SQRT_HALF = sqrtf(0.5f); - -namespace audioapi { - -/** - * Public interfaces - memory management - */ - -AudioBus::AudioBus(size_t size, int numberOfChannels, float sampleRate) - : numberOfChannels_(numberOfChannels), sampleRate_(sampleRate), size_(size) { - createChannels(); -} - -AudioBus::AudioBus(const AudioBus &other) { - numberOfChannels_ = other.numberOfChannels_; - sampleRate_ = other.sampleRate_; - size_ = other.size_; - - createChannels(); - - for (int i = 0; i < numberOfChannels_; i += 1) { - channels_[i] = std::make_shared(*other.channels_[i]); - } -} - -AudioBus::AudioBus(AudioBus &&other) noexcept - : channels_(std::move(other.channels_)), - numberOfChannels_(other.numberOfChannels_), - sampleRate_(other.sampleRate_), - size_(other.size_) { - other.numberOfChannels_ = 0; - other.sampleRate_ = 0.0f; - other.size_ = 0; -} - -AudioBus &AudioBus::operator=(const AudioBus &other) { - if (this == &other) { - return *this; - } - - numberOfChannels_ = other.numberOfChannels_; - sampleRate_ = other.sampleRate_; - size_ = other.size_; - - createChannels(); - - for (int i = 0; i < numberOfChannels_; i += 1) { - channels_[i] = std::make_shared(*other.channels_[i]); - } - - return *this; -} - -AudioBus::~AudioBus() { - channels_.clear(); -} - -/** - * Public interfaces - getters - */ - -int AudioBus::getNumberOfChannels() const { - return numberOfChannels_; -} - -float AudioBus::getSampleRate() const { - return sampleRate_; -} - -size_t AudioBus::getSize() const { - return size_; -} - -AudioArray *AudioBus::getChannel(int index) const { - return channels_[index].get(); -} - -AudioArray *AudioBus::getChannelByType(int channelType) const { - switch (getNumberOfChannels()) { - case 1: // mono - if (channelType == ChannelMono) { - return getChannel(0); - } - return nullptr; - - case 2: // stereo - switch (channelType) { - case ChannelLeft: - return getChannel(0); - case ChannelRight: - return getChannel(1); - default: - return nullptr; - } - - case 4: // quad - switch (channelType) { - case ChannelLeft: - return getChannel(0); - case ChannelRight: - return getChannel(1); - case ChannelSurroundLeft: - return getChannel(2); - case ChannelSurroundRight: - return getChannel(3); - default: - return nullptr; - } - - case 5: // 5.0 - switch (channelType) { - case ChannelLeft: - return getChannel(0); - case ChannelRight: - return getChannel(1); - case ChannelCenter: - return getChannel(2); - case ChannelSurroundLeft: - return getChannel(3); - case ChannelSurroundRight: - return getChannel(4); - default: - return nullptr; - } - - case 6: // 5.1 - switch (channelType) { - case ChannelLeft: - return getChannel(0); - case ChannelRight: - return getChannel(1); - case ChannelCenter: - return getChannel(2); - case ChannelLFE: - return getChannel(3); - case ChannelSurroundLeft: - return getChannel(4); - case ChannelSurroundRight: - return getChannel(5); - default: - return nullptr; - } - default: - return nullptr; - } -} - -std::shared_ptr AudioBus::getSharedChannel(int index) const { - return channels_[index]; -} - -AudioArray &AudioBus::operator[](size_t index) { - return *channels_[index]; -} - -const AudioArray &AudioBus::operator[](size_t index) const { - return *channels_[index]; -} - -/** - * Public interfaces - audio processing and setters - */ - -void AudioBus::zero() { - zero(0, getSize()); -} - -void AudioBus::zero(size_t start, size_t length) { - for (auto it = channels_.begin(); it != channels_.end(); it += 1) { - it->get()->zero(start, length); - } -} - -void AudioBus::normalize() { - float maxAbsValue = this->maxAbsValue(); - - if (maxAbsValue == 0.0f || maxAbsValue == 1.0f) { - return; - } - - float scale = 1.0f / maxAbsValue; - this->scale(scale); -} - -void AudioBus::scale(float value) { - for (auto &channel : channels_) { - channel->scale(value); - } -} - -float AudioBus::maxAbsValue() const { - float maxAbsValue = 1.0f; - - for (const auto &channel : channels_) { - float channelMaxAbsValue = channel->getMaxAbsValue(); - maxAbsValue = std::max(maxAbsValue, channelMaxAbsValue); - } - - return maxAbsValue; -} - -void AudioBus::sum(const AudioBus *source, ChannelInterpretation interpretation) { - sum(source, 0, 0, getSize(), interpretation); -} - -void AudioBus::sum( - const AudioBus *source, - size_t start, - size_t length, - ChannelInterpretation interpretation) { - sum(source, start, start, length, interpretation); -} - -void AudioBus::sum( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length, - ChannelInterpretation interpretation) { - if (source == this) { - return; - } - - int numberOfSourceChannels = source->getNumberOfChannels(); - int numberOfChannels = getNumberOfChannels(); - - if (interpretation == ChannelInterpretation::DISCRETE) { - discreteSum(source, sourceStart, destinationStart, length); - return; - } - - // Source channel count is smaller than current bus, we need to up-mix. - if (numberOfSourceChannels < numberOfChannels) { - sumByUpMixing(source, sourceStart, destinationStart, length); - return; - } - - // Source channel count is larger than current bus, we need to down-mix. - if (numberOfSourceChannels > numberOfChannels) { - sumByDownMixing(source, sourceStart, destinationStart, length); - return; - } - - // Source and destination channel counts are the same. Just sum the channels. - for (int i = 0; i < numberOfChannels_; i += 1) { - getChannel(i)->sum(source->getChannel(i), sourceStart, destinationStart, length); - } -} - -void AudioBus::copy(const AudioBus *source) { - copy(source, 0, 0, getSize()); -} - -void AudioBus::copy(const AudioBus *source, size_t start, size_t length) { - copy(source, start, start, length); -} - -void AudioBus::copy( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length) { - if (source == this) { - return; - } - - if (source->getNumberOfChannels() == getNumberOfChannels()) { - for (int i = 0; i < getNumberOfChannels(); i += 1) { - getChannel(i)->copy(source->getChannel(i), sourceStart, destinationStart, length); - } - - return; - } - - // zero + sum is equivalent to copy, but takes care of up/down-mixing. - zero(destinationStart, length); - sum(source, sourceStart, destinationStart, length); -} - -/** - * Internal tooling - channel initialization - */ - -void AudioBus::createChannels() { - channels_ = std::vector>(numberOfChannels_); - - for (int i = 0; i < numberOfChannels_; i += 1) { - channels_[i] = std::make_shared(size_); - } -} - -/** - * Internal tooling - channel summing - */ - -void AudioBus::discreteSum( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length) const { - int numberOfChannels = std::min(getNumberOfChannels(), source->getNumberOfChannels()); - - // In case of source > destination, we "down-mix" and drop the extra channels. - // In case of source < destination, we "up-mix" as many channels as we have, - // leaving the remaining channels untouched. - for (int i = 0; i < numberOfChannels; i++) { - getChannel(i)->sum(source->getChannel(i), sourceStart, destinationStart, length); - } -} - -void AudioBus::sumByUpMixing( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length) { - int numberOfSourceChannels = source->getNumberOfChannels(); - int numberOfChannels = getNumberOfChannels(); - - // Mono to stereo (1 -> 2, 4) - if (numberOfSourceChannels == 1 && (numberOfChannels == 2 || numberOfChannels == 4)) { - AudioArray *sourceChannel = source->getChannelByType(ChannelMono); - - getChannelByType(ChannelLeft)->sum(sourceChannel, sourceStart, destinationStart, length); - getChannelByType(ChannelRight)->sum(sourceChannel, sourceStart, destinationStart, length); - return; - } - - // Mono to 5.1 (1 -> 6) - if (numberOfSourceChannels == 1 && numberOfChannels == 6) { - AudioArray *sourceChannel = source->getChannel(0); - - getChannelByType(ChannelCenter)->sum(sourceChannel, sourceStart, destinationStart, length); - return; - } - - // Stereo 2 to stereo 4 or 5.1 (2 -> 4, 6) - if (numberOfSourceChannels == 2 && (numberOfChannels == 4 || numberOfChannels == 6)) { - getChannelByType(ChannelLeft) - ->sum(source->getChannelByType(ChannelLeft), sourceStart, destinationStart, length); - getChannelByType(ChannelRight) - ->sum(source->getChannelByType(ChannelRight), sourceStart, destinationStart, length); - return; - } - - // Stereo 4 to 5.1 (4 -> 6) - if (numberOfSourceChannels == 4 && numberOfChannels == 6) { - getChannelByType(ChannelLeft) - ->sum(source->getChannelByType(ChannelLeft), sourceStart, destinationStart, length); - getChannelByType(ChannelRight) - ->sum(source->getChannelByType(ChannelRight), sourceStart, destinationStart, length); - getChannelByType(ChannelSurroundLeft) - ->sum(source->getChannelByType(ChannelSurroundLeft), sourceStart, destinationStart, length); - getChannelByType(ChannelSurroundRight) - ->sum( - source->getChannelByType(ChannelSurroundRight), sourceStart, destinationStart, length); - return; - } - - discreteSum(source, sourceStart, destinationStart, length); -} - -void AudioBus::sumByDownMixing( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length) { - int numberOfSourceChannels = source->getNumberOfChannels(); - int numberOfChannels = getNumberOfChannels(); - - // Stereo to mono (2 -> 1): output += 0.5 * (input.left + input.right). - if (numberOfSourceChannels == 2 && numberOfChannels == 1) { - float *sourceLeft = source->getChannelByType(ChannelLeft)->getData(); - float *sourceRight = source->getChannelByType(ChannelRight)->getData(); - - float *destinationData = getChannelByType(ChannelMono)->getData(); - - dsp::multiplyByScalarThenAddToOutput( - sourceLeft + sourceStart, 0.5f, destinationData + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceRight + sourceStart, 0.5f, destinationData + destinationStart, length); - return; - } - - // Stereo 4 to mono (4 -> 1): - // output += 0.25 * (input.left + input.right + input.surroundLeft + - // input.surroundRight) - if (numberOfSourceChannels == 4 && numberOfChannels == 1) { - float *sourceLeft = source->getChannelByType(ChannelLeft)->getData(); - float *sourceRight = source->getChannelByType(ChannelRight)->getData(); - float *sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData(); - float *sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData(); - - float *destinationData = getChannelByType(ChannelMono)->getData(); - - dsp::multiplyByScalarThenAddToOutput( - sourceLeft + sourceStart, 0.25f, destinationData + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceRight + sourceStart, 0.25f, destinationData + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundLeft + sourceStart, 0.25f, destinationData + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundRight + sourceStart, 0.25f, destinationData + destinationStart, length); - return; - } - - // 5.1 to mono (6 -> 1): - // output += sqrt(1/2) * (input.left + input.right) + input.center + 0.5 * - // (input.surroundLeft + input.surroundRight) - if (numberOfSourceChannels == 6 && numberOfChannels == 1) { - float *sourceLeft = source->getChannelByType(ChannelLeft)->getData(); - float *sourceRight = source->getChannelByType(ChannelRight)->getData(); - float *sourceCenter = source->getChannelByType(ChannelCenter)->getData(); - float *sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData(); - float *sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData(); - - float *destinationData = getChannelByType(ChannelMono)->getData(); - - dsp::multiplyByScalarThenAddToOutput( - sourceLeft + sourceStart, SQRT_HALF, destinationData + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceRight + sourceStart, SQRT_HALF, destinationData + destinationStart, length); - dsp::add( - sourceCenter + sourceStart, - destinationData + destinationStart, - destinationData + destinationStart, - length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundLeft + sourceStart, 0.5f, destinationData + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundRight + sourceStart, 0.5f, destinationData + destinationStart, length); - - return; - } - - // Stereo 4 to stereo 2 (4 -> 2): - // output.left += 0.5 * (input.left + input.surroundLeft) - // output.right += 0.5 * (input.right + input.surroundRight) - if (numberOfSourceChannels == 4 && numberOfChannels == 2) { - float *sourceLeft = source->getChannelByType(ChannelLeft)->getData(); - float *sourceRight = source->getChannelByType(ChannelRight)->getData(); - float *sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData(); - float *sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData(); - - float *destinationLeft = getChannelByType(ChannelLeft)->getData(); - float *destinationRight = getChannelByType(ChannelRight)->getData(); - - dsp::multiplyByScalarThenAddToOutput( - sourceLeft + sourceStart, 0.5f, destinationLeft + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundLeft + sourceStart, 0.5f, destinationLeft + destinationStart, length); - - dsp::multiplyByScalarThenAddToOutput( - sourceRight + sourceStart, 0.5f, destinationRight + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundRight + sourceStart, 0.5f, destinationRight + destinationStart, length); - return; - } - - // 5.1 to stereo (6 -> 2): - // output.left += input.left + sqrt(1/2) * (input.center + input.surroundLeft) - // output.right += input.right + sqrt(1/2) * (input.center + - // input.surroundRight) - if (numberOfSourceChannels == 6 && numberOfChannels == 2) { - float *sourceLeft = source->getChannelByType(ChannelLeft)->getData(); - float *sourceRight = source->getChannelByType(ChannelRight)->getData(); - float *sourceCenter = source->getChannelByType(ChannelCenter)->getData(); - float *sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData(); - float *sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData(); - - float *destinationLeft = getChannelByType(ChannelLeft)->getData(); - float *destinationRight = getChannelByType(ChannelRight)->getData(); - - dsp::add( - sourceLeft + sourceStart, - destinationLeft + destinationStart, - destinationLeft + destinationStart, - length); - dsp::multiplyByScalarThenAddToOutput( - sourceCenter + sourceStart, SQRT_HALF, destinationLeft + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundLeft + sourceStart, SQRT_HALF, destinationLeft + destinationStart, length); - - dsp::add( - sourceRight + sourceStart, - destinationRight + destinationStart, - destinationRight + destinationStart, - length); - dsp::multiplyByScalarThenAddToOutput( - sourceCenter + sourceStart, SQRT_HALF, destinationRight + destinationStart, length); - dsp::multiplyByScalarThenAddToOutput( - sourceSurroundRight + sourceStart, SQRT_HALF, destinationRight + destinationStart, length); - return; - } - - // 5.1 to stereo 4 (6 -> 4): - // output.left += input.left + sqrt(1/2) * input.center - // output.right += input.right + sqrt(1/2) * input.center - // output.surroundLeft += input.surroundLeft - // output.surroundRight += input.surroundRight - if (numberOfSourceChannels == 6 && numberOfChannels == 4) { - float *sourceLeft = source->getChannelByType(ChannelLeft)->getData(); - float *sourceRight = source->getChannelByType(ChannelRight)->getData(); - float *sourceCenter = source->getChannelByType(ChannelCenter)->getData(); - float *sourceSurroundLeft = source->getChannelByType(ChannelSurroundLeft)->getData(); - float *sourceSurroundRight = source->getChannelByType(ChannelSurroundRight)->getData(); - - float *destinationLeft = getChannelByType(ChannelLeft)->getData(); - float *destinationRight = getChannelByType(ChannelRight)->getData(); - float *destinationSurroundLeft = getChannelByType(ChannelSurroundLeft)->getData(); - float *destinationSurroundRight = getChannelByType(ChannelSurroundRight)->getData(); - - dsp::add( - sourceLeft + sourceStart, - destinationLeft + destinationStart, - destinationLeft + destinationStart, - length); - dsp::multiplyByScalarThenAddToOutput( - sourceCenter, SQRT_HALF, destinationLeft + destinationStart, length); - - dsp::add( - sourceRight + sourceStart, - destinationRight + destinationStart, - destinationRight + destinationStart, - length); - dsp::multiplyByScalarThenAddToOutput( - sourceCenter, SQRT_HALF, destinationRight + destinationStart, length); - - dsp::add( - sourceSurroundLeft + sourceStart, - destinationSurroundLeft + destinationStart, - destinationSurroundLeft + destinationStart, - length); - dsp::add( - sourceSurroundRight + sourceStart, - destinationSurroundRight + destinationStart, - destinationSurroundRight + destinationStart, - length); - return; - } - - discreteSum(source, sourceStart, destinationStart, length); -} - -} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBus.h b/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBus.h deleted file mode 100644 index ebcf34cf7..000000000 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/AudioBus.h +++ /dev/null @@ -1,93 +0,0 @@ -#pragma once - -#include - -#include -#include -#include -#include - -namespace audioapi { - -class BaseAudioContext; -class AudioArray; - -class AudioBus { - public: - enum { - ChannelMono = 0, - ChannelLeft = 0, - ChannelRight = 1, - ChannelCenter = 2, - ChannelLFE = 3, - ChannelSurroundLeft = 4, - ChannelSurroundRight = 5, - }; - - explicit AudioBus() = default; - explicit AudioBus(size_t size, int numberOfChannels, float sampleRate); - AudioBus(const AudioBus &other); - AudioBus(AudioBus &&other) noexcept; - AudioBus &operator=(const AudioBus &other); - - ~AudioBus(); - - [[nodiscard]] int getNumberOfChannels() const; - [[nodiscard]] float getSampleRate() const; - [[nodiscard]] size_t getSize() const; - [[nodiscard]] AudioArray *getChannel(int index) const; - [[nodiscard]] AudioArray *getChannelByType(int channelType) const; - [[nodiscard]] std::shared_ptr getSharedChannel(int index) const; - - AudioArray &operator[](size_t index); - const AudioArray &operator[](size_t index) const; - - void normalize(); - void scale(float value); - [[nodiscard]] float maxAbsValue() const; - - void zero(); - void zero(size_t start, size_t length); - - void sum( - const AudioBus *source, - ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS); - void sum( - const AudioBus *source, - size_t start, - size_t length, - ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS); - void sum( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length, - ChannelInterpretation interpretation = ChannelInterpretation::SPEAKERS); - - void copy(const AudioBus *source); - void copy(const AudioBus *source, size_t start, size_t length); - void copy(const AudioBus *source, size_t sourceStart, size_t destinationStart, size_t length); - - private: - std::vector> channels_; - - int numberOfChannels_; - float sampleRate_; - size_t size_; - - void createChannels(); - void discreteSum( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length) const; - void - sumByUpMixing(const AudioBus *source, size_t sourceStart, size_t destinationStart, size_t length); - void sumByDownMixing( - const AudioBus *source, - size_t sourceStart, - size_t destinationStart, - size_t length); -}; - -} // namespace audioapi diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.cpp b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.cpp index 142cd9819..a09faa09d 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.cpp @@ -1,73 +1,88 @@ #include +#include + namespace audioapi { CircularAudioArray::CircularAudioArray(size_t size) : AudioArray(size) {} +void CircularAudioArray::push_back(const AudioArray & data, size_t size, bool skipAvailableSpaceCheck) { + push_back(data.begin(), size, skipAvailableSpaceCheck); +} + void CircularAudioArray::push_back(const float *data, size_t size, bool skipAvailableSpaceCheck) { - if (size > size_) { - throw std::overflow_error("size exceeds CircularAudioArray size_"); - } - - if (size > getAvailableSpace() && !skipAvailableSpaceCheck) { - throw std::overflow_error("not enough space in CircularAudioArray"); - } - - if (vWriteIndex_ + size > size_) { - auto partSize = size_ - vWriteIndex_; - memcpy(data_ + vWriteIndex_, data, partSize * sizeof(float)); - memcpy(data_, data + partSize, (size - partSize) * sizeof(float)); - } else { - memcpy(data_ + vWriteIndex_, data, size * sizeof(float)); - } - - vWriteIndex_ = vWriteIndex_ + size > size_ ? vWriteIndex_ + size - size_ : vWriteIndex_ + size; + if (size > size_) { + throw std::overflow_error("size exceeds CircularAudioArray size_"); + } + + if (size > getAvailableSpace() && !skipAvailableSpaceCheck) { + throw std::overflow_error("not enough space in CircularAudioArray"); + } + + if (vWriteIndex_ + size > size_) { + auto partSize = size_ - vWriteIndex_; + copy(data, 0, vWriteIndex_, partSize); + copy(data, partSize, 0, size - partSize); + } else { + copy(data, 0, vWriteIndex_, size); + } + + vWriteIndex_ = vWriteIndex_ + size > size_ ? vWriteIndex_ + size - size_ : vWriteIndex_ + size; +} + +void CircularAudioArray::pop_front(AudioArray &data, size_t size, bool skipAvailableDataCheck) { + pop_front(data.begin(), size, skipAvailableDataCheck); } void CircularAudioArray::pop_front(float *data, size_t size, bool skipAvailableDataCheck) { - if (size > size_) { - throw std::overflow_error("size exceeds CircularAudioArray size_"); - } - - if (size > getNumberOfAvailableFrames() && !skipAvailableDataCheck) { - throw std::overflow_error("not enough data in CircularAudioArray"); - } - - if (vReadIndex_ + size > size_) { - auto partSize = size_ - vReadIndex_; - memcpy(data, data_ + vReadIndex_, partSize * sizeof(float)); - memcpy(data + partSize, data_, (size - partSize) * sizeof(float)); - } else { - memcpy(data, data_ + vReadIndex_, size * sizeof(float)); - } - - vReadIndex_ = vReadIndex_ + size > size_ ? vReadIndex_ + size - size_ : vReadIndex_ + size; + if (size > size_) { + throw std::overflow_error("size exceeds CircularAudioArray size_"); + } + + if (size > getNumberOfAvailableFrames() && !skipAvailableDataCheck) { + throw std::overflow_error("not enough data in CircularAudioArray"); + } + + if (vReadIndex_ + size > size_) { + auto partSize = size_ - vReadIndex_; + copyTo(data, vReadIndex_, 0, partSize); + copyTo(data, 0, partSize, size - partSize); + } else { + copyTo(data, vReadIndex_, 0, size); + } + + vReadIndex_ = vReadIndex_ + size > size_ ? vReadIndex_ + size - size_ : vReadIndex_ + size; } void CircularAudioArray::pop_back( - float *data, + AudioArray &data, size_t size, size_t offset, bool skipAvailableDataCheck) { - if (size > size_) { - throw std::overflow_error("size exceeds CircularAudioArray size_"); - } - - if (size + offset > getNumberOfAvailableFrames() && !skipAvailableDataCheck) { - throw std::overflow_error("not enough data in CircularAudioArray"); - } - - if (vWriteIndex_ <= offset) { - memcpy(data, data_ + size_ - (offset - vWriteIndex_) - size, size * sizeof(float)); - } else if (vWriteIndex_ <= size + offset) { - auto partSize = size + offset - vWriteIndex_; - memcpy(data, data_ + size_ - partSize, partSize * sizeof(float)); - memcpy(data + partSize, data_, (size - partSize) * sizeof(float)); - } else { - memcpy(data, data_ + vWriteIndex_ - size - offset, size * sizeof(float)); - } - - vReadIndex_ = vWriteIndex_ - offset < 0 ? size + vWriteIndex_ - offset : vWriteIndex_ - offset; + pop_back(data.begin(), size, offset, skipAvailableDataCheck); +} + +void CircularAudioArray::pop_back(float *data, size_t size, size_t offset, + bool skipAvailableDataCheck) { + if (size > size_) { + throw std::overflow_error("size exceeds CircularAudioArray size_"); + } + + if (size + offset > getNumberOfAvailableFrames() && !skipAvailableDataCheck) { + throw std::overflow_error("not enough data in CircularAudioArray"); + } + + if (vWriteIndex_ <= offset) { + copyTo(data, size_ - (offset - vWriteIndex_) - size, 0, size); + } else if (vWriteIndex_ <= size + offset) { + auto partSize = size + offset - vWriteIndex_; + copyTo(data, size_ - partSize, 0, partSize); + copyTo(data, 0, partSize, size - partSize); + } else { + copyTo(data, vWriteIndex_ - size - offset, 0, size); + } + + vReadIndex_ = vWriteIndex_ - offset < 0 ? size + vWriteIndex_ - offset : vWriteIndex_ - offset; } size_t CircularAudioArray::getNumberOfAvailableFrames() const { diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.h b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.h index f0df81b75..757ed2098 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularAudioArray.h @@ -11,8 +11,14 @@ class CircularAudioArray : public AudioArray { CircularAudioArray(const CircularAudioArray &other) = default; ~CircularAudioArray() = default; + void push_back(const AudioArray &data, size_t size, bool skipAvailableSpaceCheck = false); void push_back(const float *data, size_t size, bool skipAvailableSpaceCheck = false); + + void pop_front(AudioArray &data, size_t size, bool skipAvailableDataCheck = false); void pop_front(float *data, size_t size, bool skipAvailableDataCheck = false); + + void + pop_back(AudioArray &data, size_t size, size_t offset = 0, bool skipAvailableDataCheck = false); void pop_back(float *data, size_t size, size_t offset = 0, bool skipAvailableDataCheck = false); [[nodiscard]] size_t getNumberOfAvailableFrames() const; diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.cpp b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.cpp index 86acde67c..eab7f11e5 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.cpp +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.cpp @@ -8,46 +8,54 @@ CircularOverflowableAudioArray::CircularOverflowableAudioArray(size_t size) noex std::is_nothrow_constructible::value) : AudioArray(size) {} -void CircularOverflowableAudioArray::write(const float *data, const size_t size) { - size_t writeIndex = vWriteIndex_.load(std::memory_order_relaxed); - - if (size > size_) { - return; // Ignore write if size exceeds buffer size - } - - /// Advances the read index if there is not enough space - readLock_.lock(); - size_t availableSpace = (size_ + vReadIndex_ - writeIndex - 1) % size_; - if (size > availableSpace) { - vReadIndex_ = (writeIndex + size + 1) % size_; - } - readLock_.unlock(); - - size_t partSize = size_ - writeIndex; - if (size > partSize) { - std::memcpy(data_ + writeIndex, data, partSize * sizeof(float)); - std::memcpy(data_, data + partSize, (size - partSize) * sizeof(float)); - } else { - std::memcpy(data_ + writeIndex, data, size * sizeof(float)); - } - vWriteIndex_.store((writeIndex + size) % size_, std::memory_order_relaxed); +void CircularOverflowableAudioArray::write(const AudioArray &data, const size_t size) { + write(data.begin(), size); } -size_t CircularOverflowableAudioArray::read(float *output, size_t size) const { - readLock_.lock(); - size_t availableSpace = getAvailableSpace(); - size_t readSize = std::min(size, availableSpace); - - size_t partSize = size_ - vReadIndex_; - if (readSize > partSize) { - std::memcpy(output, data_ + vReadIndex_, partSize * sizeof(float)); - std::memcpy(output + partSize, data_, (readSize - partSize) * sizeof(float)); - } else { - std::memcpy(output, data_ + vReadIndex_, readSize * sizeof(float)); - } - vReadIndex_ = (vReadIndex_ + readSize) % size_; - readLock_.unlock(); - return readSize; +void CircularOverflowableAudioArray::write(const float *data, size_t size) { + size_t writeIndex = vWriteIndex_.load(std::memory_order_relaxed); + + if (size > size_) { + return; // Ignore write if size exceeds buffer size + } + + /// Advances the read index if there is not enough space + readLock_.lock(); + size_t availableSpace = (size_ + vReadIndex_ - writeIndex - 1) % size_; + if (size > availableSpace) { + vReadIndex_ = (writeIndex + size + 1) % size_; + } + readLock_.unlock(); + + size_t partSize = size_ - writeIndex; + if (size > partSize) { + copy(data, 0, writeIndex, partSize); + copy(data, partSize, 0, size - partSize); + } else { + copy(data, 0, writeIndex, size); + } + vWriteIndex_.store((writeIndex + size) % size_, std::memory_order_relaxed); +} + +size_t CircularOverflowableAudioArray::read(AudioArray &data, size_t size) const { + return read(data.begin(), size); +} + +size_t CircularOverflowableAudioArray::read(float *data, size_t size) const { + readLock_.lock(); + size_t availableSpace = getAvailableSpace(); + size_t readSize = std::min(size, availableSpace); + + size_t partSize = size_ - vReadIndex_; + if (readSize > partSize) { + copyTo(data, vReadIndex_, 0, partSize); + copyTo(data, 0, partSize, readSize - partSize); + } else { + copyTo(data, vReadIndex_, 0, readSize); + } + vReadIndex_ = (vReadIndex_ + readSize) % size_; + readLock_.unlock(); + return readSize; } size_t CircularOverflowableAudioArray::getAvailableSpace() const { diff --git a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.h b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.h index 601da5610..449100423 100644 --- a/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.h +++ b/packages/react-native-audio-api/common/cpp/audioapi/utils/CircularOverflowableAudioArray.h @@ -19,6 +19,12 @@ class CircularOverflowableAudioArray : public AudioArray { CircularOverflowableAudioArray(const CircularOverflowableAudioArray &other) = delete; ~CircularOverflowableAudioArray() = default; + /// @brief Writes data to the circular buffer. + /// @note Might wait for read operation to finish if it is in progress. It ignores writes that exceed the buffer size. + /// @param data Reference to input AudioArray. + /// @param size Number of frames to write. + void write(const AudioArray &data, size_t size); + /// @brief Writes data to the circular buffer. /// @note Might wait for read operation to finish if it is in progress. It ignores writes that exceed the buffer size. /// @param data Pointer to the input buffer. @@ -26,10 +32,16 @@ class CircularOverflowableAudioArray : public AudioArray { void write(const float *data, size_t size); /// @brief Reads data from the circular buffer. - /// @param output Pointer to the output buffer. + /// @param data Reference to output AudioArray. + /// @param size Number of frames to read. + /// @return The number of frames actually read. + size_t read(AudioArray &data, size_t size) const; + + /// @brief Reads data from the circular buffer. + /// @param data Pointer to the output buffer. /// @param size Number of frames to read. /// @return The number of frames actually read. - size_t read(float *output, size_t size) const; + size_t read(float *data, size_t size) const; private: std::atomic vWriteIndex_ = {0}; diff --git a/packages/react-native-audio-api/common/cpp/test/src/AudioParamTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/AudioParamTest.cpp index 7d8cbae23..75efa552a 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/AudioParamTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/AudioParamTest.cpp @@ -121,8 +121,15 @@ TEST_F(AudioParamTest, SetTargetAtTime) { TEST_F(AudioParamTest, SetValueCurveAtTime) { auto param = AudioParam(0.0, 0.0, 1.0, context); param.setValue(0.5); - auto curve = std::make_shared>(std::vector{0.1, 0.4, 0.2, 0.8, 0.5}); - param.setValueCurveAtTime(curve, curve->size(), 0.1, 0.2); + auto curve = std::make_shared(5); + auto curveSpan = curve ->span(); + curveSpan[0] = 0.1f; + curveSpan[1] = 0.4f; + curveSpan[2] = 0.2f; + curveSpan[3] = 0.8f; + curveSpan[4] = 0.5f; + + param.setValueCurveAtTime(curve, curve->getSize(), 0.1, 0.2); // 5 elements over 0.2s => each element is 0.04s apart float value = param.processKRateParam(1, 0.05); diff --git a/packages/react-native-audio-api/common/cpp/test/src/AudioScheduledSourceTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/AudioScheduledSourceTest.cpp index ec2df8519..f8ce75501 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/AudioScheduledSourceTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/AudioScheduledSourceTest.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include #include @@ -33,14 +33,14 @@ class TestableAudioScheduledSourceNode : public AudioScheduledSourceNode { } void updatePlaybackInfo( - const std::shared_ptr &processingBus, + const std::shared_ptr &processingBuffer, int framesToProcess, size_t &startOffset, size_t &nonSilentFramesToProcess, float sampleRate, size_t currentSampleFrame) { AudioScheduledSourceNode::updatePlaybackInfo( - processingBus, + processingBuffer, framesToProcess, startOffset, nonSilentFramesToProcess, @@ -48,7 +48,7 @@ class TestableAudioScheduledSourceNode : public AudioScheduledSourceNode { currentSampleFrame); } - std::shared_ptr processNode(const std::shared_ptr &, int) override { + std::shared_ptr processNode(const std::shared_ptr &, int) override { return nullptr; } @@ -60,15 +60,16 @@ class TestableAudioScheduledSourceNode : public AudioScheduledSourceNode { if (std::shared_ptr context = context_.lock()) { size_t startOffset = 0; size_t nonSilentFramesToProcess = 0; - auto processingBus = std::make_shared(128, 2, static_cast(SAMPLE_RATE)); + auto processingBuffer = + std::make_shared(128, 2, static_cast(SAMPLE_RATE)); updatePlaybackInfo( - processingBus, + processingBuffer, frames, startOffset, nonSilentFramesToProcess, context->getSampleRate(), context->getCurrentSampleFrame()); - context->getDestination()->renderAudio(processingBus, frames); + context->getDestination()->renderAudio(processingBuffer, frames); } } }; diff --git a/packages/react-native-audio-api/common/cpp/test/src/ConstantSourceTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/ConstantSourceTest.cpp index 0f3ecca07..6136b4abe 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/ConstantSourceTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/ConstantSourceTest.cpp @@ -1,9 +1,9 @@ -#include #include #include #include +#include #include -#include +#include #include #include #include @@ -33,10 +33,10 @@ class TestableConstantSourceNode : public ConstantSourceNode { getOffsetParam()->setValue(value); } - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return ConstantSourceNode::processNode(processingBus, framesToProcess); + return ConstantSourceNode::processNode(processingBuffer, framesToProcess); } }; @@ -48,18 +48,18 @@ TEST_F(ConstantSourceTest, ConstantSourceCanBeCreated) { TEST_F(ConstantSourceTest, ConstantSourceOutputsConstantValue) { static constexpr int FRAMES_TO_PROCESS = 4; - auto bus = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); auto constantSource = TestableConstantSourceNode(context); // constantSource.start(context->getCurrentTime()); - // auto resultBus = constantSource.processNode(bus, FRAMES_TO_PROCESS); + // auto resultBuffer = constantSource.processNode(buffer, FRAMES_TO_PROCESS); // for (int i = 0; i < FRAMES_TO_PROCESS; ++i) { - // EXPECT_FLOAT_EQ((*resultBus->getChannel(0))[i], 1.0f); + // EXPECT_FLOAT_EQ((*resultBuffer->getChannel(0))[i], 1.0f); // } // constantSource.setOffsetParam(0.5f); - // resultBus = constantSource.processNode(bus, FRAMES_TO_PROCESS); + // resultBuffer = constantSource.processNode(buffer, FRAMES_TO_PROCESS); // for (int i = 0; i < FRAMES_TO_PROCESS; ++i) { - // EXPECT_FLOAT_EQ((*resultBus->getChannel(0))[i], 0.5f); + // EXPECT_FLOAT_EQ((*resultBuffer->getChannel(0))[i], 0.5f); // } } diff --git a/packages/react-native-audio-api/common/cpp/test/src/DelayTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/DelayTest.cpp index 3ccc6a91d..7053b67a6 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/DelayTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/DelayTest.cpp @@ -1,9 +1,9 @@ -#include #include #include #include +#include #include -#include +#include #include #include #include @@ -26,16 +26,17 @@ class DelayTest : public ::testing::Test { class TestableDelayNode : public DelayNode { public: - explicit TestableDelayNode(std::shared_ptr context, const DelayOptions& options) : DelayNode(context, options) {} + explicit TestableDelayNode(std::shared_ptr context, const DelayOptions &options) + : DelayNode(context, options) {} void setDelayTimeParam(float value) { getDelayTimeParam()->setValue(value); } - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return DelayNode::processNode(processingBus, framesToProcess); + return DelayNode::processNode(processingBuffer, framesToProcess); } }; @@ -52,14 +53,14 @@ TEST_F(DelayTest, DelayWithZeroDelayOutputsInputSignal) { auto delayNode = TestableDelayNode(context, options); delayNode.setDelayTimeParam(DELAY_TIME); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - bus->getChannel(0)->getData()[i] = i + 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannel(0))[i] = i + 1; } - auto resultBus = delayNode.processNode(bus, FRAMES_TO_PROCESS); + auto resultBuffer = delayNode.processNode(buffer, FRAMES_TO_PROCESS); for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { - EXPECT_FLOAT_EQ((*resultBus->getChannel(0))[i], static_cast(i + 1)); + EXPECT_FLOAT_EQ((*resultBuffer->getChannel(0))[i], static_cast(i + 1)); } } @@ -71,20 +72,20 @@ TEST_F(DelayTest, DelayAppliesTimeShiftCorrectly) { auto delayNode = TestableDelayNode(context, options); delayNode.setDelayTimeParam(DELAY_TIME); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - bus->getChannel(0)->getData()[i] = i + 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannel(0))[i] = i + 1; } - auto resultBus = delayNode.processNode(bus, FRAMES_TO_PROCESS); + auto resultBuffer = delayNode.processNode(buffer, FRAMES_TO_PROCESS); for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { if (i < FRAMES_TO_PROCESS / 2) { // First 64 samples should be zero due to delay - EXPECT_FLOAT_EQ((*resultBus->getChannel(0))[i], 0.0f); + EXPECT_FLOAT_EQ((*resultBuffer->getChannel(0))[i], 0.0f); } else { EXPECT_FLOAT_EQ( - (*resultBus->getChannel(0))[i], + (*resultBuffer->getChannel(0))[i], static_cast( - i + 1 - FRAMES_TO_PROCESS / 2)); // Last 64 samples should be 1st part of bus + i + 1 - FRAMES_TO_PROCESS / 2)); // Last 64 samples should be 1st part of buffer } } } @@ -97,19 +98,19 @@ TEST_F(DelayTest, DelayHandlesTailCorrectly) { auto delayNode = TestableDelayNode(context, options); delayNode.setDelayTimeParam(DELAY_TIME); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - bus->getChannel(0)->getData()[i] = i + 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannel(0))[i] = i + 1; } - delayNode.processNode(bus, FRAMES_TO_PROCESS); - auto resultBus = delayNode.processNode(bus, FRAMES_TO_PROCESS); + delayNode.processNode(buffer, FRAMES_TO_PROCESS); + auto resultBuffer = delayNode.processNode(buffer, FRAMES_TO_PROCESS); for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { - if (i < FRAMES_TO_PROCESS / 2) { // First 64 samples should be 2nd part of bus + if (i < FRAMES_TO_PROCESS / 2) { // First 64 samples should be 2nd part of buffer EXPECT_FLOAT_EQ( - (*resultBus->getChannel(0))[i], static_cast(i + 1 + FRAMES_TO_PROCESS / 2)); + (*resultBuffer->getChannel(0))[i], static_cast(i + 1 + FRAMES_TO_PROCESS / 2)); } else { - EXPECT_FLOAT_EQ((*resultBus->getChannel(0))[i], + EXPECT_FLOAT_EQ((*resultBuffer->getChannel(0))[i], 0.0f); // Last 64 samples should be zero } } diff --git a/packages/react-native-audio-api/common/cpp/test/src/GainTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/GainTest.cpp index d3d99b5cf..501daf655 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/GainTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/GainTest.cpp @@ -1,9 +1,9 @@ -#include #include #include #include +#include #include -#include +#include #include #include #include @@ -26,16 +26,17 @@ class GainTest : public ::testing::Test { class TestableGainNode : public GainNode { public: - explicit TestableGainNode(std::shared_ptr context) : GainNode(context, GainOptions()) {} + explicit TestableGainNode(std::shared_ptr context) + : GainNode(context, GainOptions()) {} void setGainParam(float value) { getGainParam()->setValue(value); } - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return GainNode::processNode(processingBus, framesToProcess); + return GainNode::processNode(processingBuffer, framesToProcess); } }; @@ -50,14 +51,14 @@ TEST_F(GainTest, GainModulatesVolumeCorrectly) { auto gainNode = TestableGainNode(context); gainNode.setGainParam(GAIN_VALUE); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - bus->getChannel(0)->getData()[i] = i + 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannel(0))[i] = i + 1; } - auto resultBus = gainNode.processNode(bus, FRAMES_TO_PROCESS); + auto resultBuffer = gainNode.processNode(buffer, FRAMES_TO_PROCESS); for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { - EXPECT_FLOAT_EQ((*resultBus->getChannel(0))[i], (i + 1) * GAIN_VALUE); + EXPECT_FLOAT_EQ((*resultBuffer->getChannel(0))[i], (i + 1) * GAIN_VALUE); } } @@ -67,15 +68,15 @@ TEST_F(GainTest, GainModulatesVolumeCorrectlyMultiChannel) { auto gainNode = TestableGainNode(context); gainNode.setGainParam(GAIN_VALUE); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 2, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - bus->getChannel(0)->getData()[i] = i + 1; - bus->getChannel(1)->getData()[i] = -i - 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 2, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannel(0))[i] = i + 1; + (*buffer->getChannel(1))[i] = -i - 1; } - auto resultBus = gainNode.processNode(bus, FRAMES_TO_PROCESS); + auto resultBuffer = gainNode.processNode(buffer, FRAMES_TO_PROCESS); for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { - EXPECT_FLOAT_EQ((*resultBus->getChannel(0))[i], (i + 1) * GAIN_VALUE); - EXPECT_FLOAT_EQ((*resultBus->getChannel(1))[i], (-i - 1) * GAIN_VALUE); + EXPECT_FLOAT_EQ((*resultBuffer->getChannel(0))[i], (i + 1) * GAIN_VALUE); + EXPECT_FLOAT_EQ((*resultBuffer->getChannel(1))[i], (-i - 1) * GAIN_VALUE); } } diff --git a/packages/react-native-audio-api/common/cpp/test/src/StereoPannerTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/StereoPannerTest.cpp index 65f9a7a6c..123a9adb8 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/StereoPannerTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/StereoPannerTest.cpp @@ -1,9 +1,9 @@ -#include #include #include #include +#include #include -#include +#include #include #include #include @@ -33,10 +33,10 @@ class TestableStereoPannerNode : public StereoPannerNode { getPanParam()->setValue(value); } - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return StereoPannerNode::processNode(processingBus, framesToProcess); + return StereoPannerNode::processNode(processingBuffer, framesToProcess); } }; @@ -51,22 +51,22 @@ TEST_F(StereoPannerTest, PanModulatesInputMonoCorrectly) { auto panNode = TestableStereoPannerNode(context); panNode.setPanParam(PAN_VALUE); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - (*bus->getChannelByType(AudioBus::ChannelLeft))[i] = i + 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannelByType(AudioBuffer::ChannelLeft))[i] = i + 1; } - auto resultBus = panNode.processNode(bus, FRAMES_TO_PROCESS); + auto resultBuffer = panNode.processNode(buffer, FRAMES_TO_PROCESS); // x = (0.5 + 1) / 2 = 0.75 // gainL = cos(x * (π / 2)) = cos(0.75 * (π / 2)) = 0.38268343236508984 // gainR = sin(x * (π / 2)) = sin(0.75 * (π / 2)) = 0.9238795325112867 for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { EXPECT_NEAR( - (*resultBus->getChannelByType(AudioBus::ChannelLeft))[i], + (*resultBuffer->getChannelByType(AudioBuffer::ChannelLeft))[i], (i + 1) * 0.38268343236508984, 1e-4); EXPECT_NEAR( - (*resultBus->getChannelByType(AudioBus::ChannelRight))[i], + (*resultBuffer->getChannelByType(AudioBuffer::ChannelRight))[i], (i + 1) * 0.9238795325112867, 1e-4); } @@ -78,23 +78,23 @@ TEST_F(StereoPannerTest, PanModulatesInputStereoCorrectlyWithNegativePan) { auto panNode = TestableStereoPannerNode(context); panNode.setPanParam(PAN_VALUE); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 2, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - (*bus->getChannelByType(AudioBus::ChannelLeft))[i] = i + 1; - (*bus->getChannelByType(AudioBus::ChannelRight))[i] = i + 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 2, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannelByType(AudioBuffer::ChannelLeft))[i] = i + 1; + (*buffer->getChannelByType(AudioBuffer::ChannelRight))[i] = i + 1; } - auto resultBus = panNode.processNode(bus, FRAMES_TO_PROCESS); + auto resultBuffer = panNode.processNode(buffer, FRAMES_TO_PROCESS); // x = -0.5 + 1 = 0.5 // gainL = cos(x * (π / 2)) = cos(0.5 * (π / 2)) = 0.7071067811865476 // gainR = sin(x * (π / 2)) = sin(0.5 * (π / 2)) = 0.7071067811865476 for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { EXPECT_NEAR( - (*resultBus->getChannelByType(AudioBus::ChannelLeft))[i], + (*resultBuffer->getChannelByType(AudioBuffer::ChannelLeft))[i], (i + 1) + (i + 1) * 0.7071067811865476, 1e-4); EXPECT_NEAR( - (*resultBus->getChannelByType(AudioBus::ChannelRight))[i], + (*resultBuffer->getChannelByType(AudioBuffer::ChannelRight))[i], (i + 1) * 0.7071067811865476, 1e-4); } @@ -106,23 +106,23 @@ TEST_F(StereoPannerTest, PanModulatesInputStereoCorrectlyWithPositivePan) { auto panNode = TestableStereoPannerNode(context); panNode.setPanParam(PAN_VALUE); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 2, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - (*bus->getChannelByType(AudioBus::ChannelLeft))[i] = i + 1; - (*bus->getChannelByType(AudioBus::ChannelRight))[i] = i + 1; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 2, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannelByType(AudioBuffer::ChannelLeft))[i] = i + 1; + (*buffer->getChannelByType(AudioBuffer::ChannelRight))[i] = i + 1; } - auto resultBus = panNode.processNode(bus, FRAMES_TO_PROCESS); + auto resultBuffer = panNode.processNode(buffer, FRAMES_TO_PROCESS); // x = 0.75 // gainL = cos(x * (π / 2)) = cos(0.75 * (π / 2)) = 0.38268343236508984 // gainR = sin(x * (π / 2)) = sin(0.75 * (π / 2)) = 0.9238795325112867 for (size_t i = 0; i < FRAMES_TO_PROCESS; ++i) { EXPECT_NEAR( - (*resultBus->getChannelByType(AudioBus::ChannelLeft))[i], + (*resultBuffer->getChannelByType(AudioBuffer::ChannelLeft))[i], (i + 1) * 0.38268343236508984, 1e-4); EXPECT_NEAR( - (*resultBus->getChannelByType(AudioBus::ChannelRight))[i], + (*resultBuffer->getChannelByType(AudioBuffer::ChannelRight))[i], (i + 1) + (i + 1) * 0.9238795325112867, 1e-4); } diff --git a/packages/react-native-audio-api/common/cpp/test/src/core/effects/WaveShaperNodeTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/core/effects/WaveShaperNodeTest.cpp index 0bce7647e..3382e74e1 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/core/effects/WaveShaperNodeTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/core/effects/WaveShaperNodeTest.cpp @@ -1,10 +1,10 @@ -#include #include #include -#include #include -#include -#include +#include +#include +#include +#include #include #include #include @@ -28,20 +28,20 @@ class TestableWaveShaperNode : public WaveShaperNode { public: explicit TestableWaveShaperNode(std::shared_ptr context) : WaveShaperNode(context, WaveShaperOptions()) { - testCurve_ = std::make_shared(3); - auto data = testCurve_->getData(); + testCurve_ = std::make_shared(3); + auto data = testCurve_->span(); data[0] = -2.0f; data[1] = 0.0f; data[2] = 2.0f; } - std::shared_ptr processNode( - const std::shared_ptr &processingBus, + std::shared_ptr processNode( + const std::shared_ptr &processingBuffer, int framesToProcess) override { - return WaveShaperNode::processNode(processingBus, framesToProcess); + return WaveShaperNode::processNode(processingBuffer, framesToProcess); } - std::shared_ptr testCurve_; + std::shared_ptr testCurve_; }; TEST_F(WaveShaperNodeTest, WaveShaperNodeCanBeCreated) { @@ -61,14 +61,14 @@ TEST_F(WaveShaperNodeTest, NoneOverSamplingProcessesCorrectly) { waveShaper->setOversample(OverSampleType::OVERSAMPLE_NONE); waveShaper->setCurve(waveShaper->testCurve_); - auto bus = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); - for (size_t i = 0; i < bus->getSize(); ++i) { - bus->getChannel(0)->getData()[i] = -1.0f + i * 0.5f; + auto buffer = std::make_shared(FRAMES_TO_PROCESS, 1, sampleRate); + for (size_t i = 0; i < buffer->getSize(); ++i) { + (*buffer->getChannel(0))[i] = -1.0f + i * 0.5f; } - auto resultBus = waveShaper->processNode(bus, FRAMES_TO_PROCESS); - auto curveData = waveShaper->testCurve_->getData(); - auto resultData = resultBus->getChannel(0)->getData(); + auto resultBuffer = waveShaper->processNode(buffer, FRAMES_TO_PROCESS); + auto curveData = waveShaper->testCurve_->span(); + auto resultData = resultBuffer->getChannel(0)->span(); EXPECT_FLOAT_EQ(resultData[0], curveData[0]); EXPECT_FLOAT_EQ(resultData[1], -1.0f); diff --git a/packages/react-native-audio-api/common/cpp/test/src/dsp/ResamplerTest.cpp b/packages/react-native-audio-api/common/cpp/test/src/dsp/ResamplerTest.cpp index f62fc9824..21e868148 100644 --- a/packages/react-native-audio-api/common/cpp/test/src/dsp/ResamplerTest.cpp +++ b/packages/react-native-audio-api/common/cpp/test/src/dsp/ResamplerTest.cpp @@ -1,16 +1,16 @@ +#include #include #include #include -#include -#include +#include #include #include using namespace audioapi; class ResamplerTest : public ::testing::Test { - protected: - static constexpr int KERNEL_SIZE = RENDER_QUANTUM_SIZE; + protected: + static constexpr int KERNEL_SIZE = RENDER_QUANTUM_SIZE; }; class TestableUpSampler : public UpSampler { @@ -39,7 +39,8 @@ TEST_F(ResamplerTest, UpSamplerCanBeCreated) { } TEST_F(ResamplerTest, DownSamplerCanBeCreated) { - auto downSampler = std::make_unique(RENDER_QUANTUM_SIZE * 2, RENDER_QUANTUM_SIZE * 2); + auto downSampler = + std::make_unique(RENDER_QUANTUM_SIZE * 2, RENDER_QUANTUM_SIZE * 2); ASSERT_NE(downSampler, nullptr); } @@ -109,8 +110,8 @@ TEST_F(ResamplerTest, UpDownSamplingProcess) { int upSamplerOutputFrames; int downSamplerOutputFrames; - EXPECT_NO_THROW(upSamplerOutputFrames = upSampler->process(inputArray, outputArray, 4)); - EXPECT_NO_THROW(downSamplerOutputFrames = downSampler->process(outputArray, inputArray, 8)); + EXPECT_NO_THROW(upSamplerOutputFrames = upSampler->process(*inputArray, *outputArray, 4)); + EXPECT_NO_THROW(downSamplerOutputFrames = downSampler->process(*outputArray, *inputArray, 8)); EXPECT_EQ(upSamplerOutputFrames, 8); EXPECT_EQ(downSamplerOutputFrames, 4); diff --git a/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.h b/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.h index 21e858888..bf4643abf 100644 --- a/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.h +++ b/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.h @@ -10,13 +10,13 @@ typedef struct objc_object NativeAudioPlayer; namespace audioapi { -class AudioBus; +class AudioBuffer; class AudioContext; class IOSAudioPlayer { public: IOSAudioPlayer( - const std::function, int)> &renderAudio, + const std::function, int)> &renderAudio, float sampleRate, int channelCount); ~IOSAudioPlayer(); @@ -30,9 +30,9 @@ class IOSAudioPlayer { bool isRunning() const; protected: - std::shared_ptr audioBus_; + std::shared_ptr audioBuffer_; NativeAudioPlayer *audioPlayer_; - std::function, int)> renderAudio_; + std::function, int)> renderAudio_; int channelCount_; std::atomic isRunning_; }; diff --git a/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.mm b/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.mm index 0b2609a1f..5fca12e33 100644 --- a/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.mm +++ b/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioPlayer.mm @@ -5,15 +5,15 @@ #include #include #include -#include +#include namespace audioapi { IOSAudioPlayer::IOSAudioPlayer( - const std::function, int)> &renderAudio, + const std::function, int)> &renderAudio, float sampleRate, int channelCount) - : renderAudio_(renderAudio), channelCount_(channelCount), audioBus_(0), isRunning_(false) + : renderAudio_(renderAudio), channelCount_(channelCount), audioBuffer_(0), isRunning_(false) { RenderAudioBlock renderAudioBlock = ^(AudioBufferList *outputData, int numFrames) { int processedFrames = 0; @@ -22,16 +22,16 @@ int framesToProcess = std::min(numFrames - processedFrames, RENDER_QUANTUM_SIZE); if (isRunning_.load(std::memory_order_acquire)) { - renderAudio_(audioBus_, framesToProcess); + renderAudio_(audioBuffer_, framesToProcess); } else { - audioBus_->zero(); + audioBuffer_->zero(); } - for (int channel = 0; channel < channelCount_; channel += 1) { + for (size_t channel = 0; channel < channelCount_; channel += 1) { float *outputChannel = (float *)outputData->mBuffers[channel].mData; - auto *inputChannel = audioBus_->getChannel(channel)->getData(); - memcpy(outputChannel + processedFrames, inputChannel, framesToProcess * sizeof(float)); + audioBuffer_->getChannel(channel)->copyTo( + outputChannel, 0, processedFrames, framesToProcess); } processedFrames += framesToProcess; @@ -42,7 +42,7 @@ sampleRate:sampleRate channelCount:channelCount_]; - audioBus_ = std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, sampleRate); + audioBuffer_ = std::make_shared(RENDER_QUANTUM_SIZE, channelCount_, sampleRate); } IOSAudioPlayer::~IOSAudioPlayer() @@ -96,10 +96,7 @@ { stop(); [audioPlayer_ cleanup]; - - if (audioBus_) { - audioBus_ = nullptr; - } + audioBuffer_ = nullptr; } } // namespace audioapi diff --git a/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioRecorder.mm b/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioRecorder.mm index ac6229b1b..d6fb6e7a2 100644 --- a/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioRecorder.mm +++ b/packages/react-native-audio-api/ios/audioapi/ios/core/IOSAudioRecorder.mm @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -51,9 +51,9 @@ if (isConnected()) { if (auto lock = Locker::tryLock(adapterNodeMutex_)) { for (size_t channel = 0; channel < adapterNode_->channelCount_; ++channel) { - float *channelData = (float *)inputBuffer->mBuffers[channel].mData; + auto data = (float *)inputBuffer->mBuffers[channel].mData; - adapterNode_->buff_[channel]->write(channelData, numFrames); + adapterNode_->buff_[channel]->write(data, numFrames); } } } diff --git a/packages/react-native-audio-api/ios/audioapi/ios/core/utils/AudioDecoder.mm b/packages/react-native-audio-api/ios/audioapi/ios/core/utils/AudioDecoder.mm index 3b2b4b8d4..a1a68a3b5 100644 --- a/packages/react-native-audio-api/ios/audioapi/ios/core/utils/AudioDecoder.mm +++ b/packages/react-native-audio-api/ios/audioapi/ios/core/utils/AudioDecoder.mm @@ -2,7 +2,6 @@ #include #include -#include #include #include #include @@ -11,7 +10,7 @@ #include #endif // RN_AUDIO_API_FFMPEG_DISABLED #include -#include +#include namespace audioapi { @@ -51,16 +50,11 @@ } auto outputFrames = buffer.size() / outputChannels; - auto audioBus = std::make_shared(outputFrames, outputChannels, outputSampleRate); + auto audioBuffer = std::make_shared(outputFrames, outputChannels, outputSampleRate); - for (int ch = 0; ch < outputChannels; ++ch) { - auto channelData = audioBus->getChannel(ch)->getData(); - for (int i = 0; i < outputFrames; ++i) { - channelData[i] = buffer[i * outputChannels + ch]; - } - } + audioBuffer->deinterleaveFrom(buffer.data(), outputFrames); - return std::make_shared(audioBus); + return audioBuffer; } std::shared_ptr AudioDecoder::decodeWithFilePath( @@ -152,10 +146,11 @@ const auto uint8Data = reinterpret_cast(decodedData.data()); size_t numFramesDecoded = decodedData.size() / (inputChannelCount * sizeof(int16_t)); - auto audioBus = std::make_shared(numFramesDecoded, inputChannelCount, inputSampleRate); + auto audioBuffer = + std::make_shared(numFramesDecoded, inputChannelCount, inputSampleRate); - for (int ch = 0; ch < inputChannelCount; ++ch) { - auto channelData = audioBus->getChannel(ch)->getData(); + for (size_t ch = 0; ch < inputChannelCount; ++ch) { + auto channelData = audioBuffer->getChannel(ch)->span(); for (size_t i = 0; i < numFramesDecoded; ++i) { size_t offset; @@ -170,7 +165,7 @@ channelData[i] = uint8ToFloat(uint8Data[offset], uint8Data[offset + 1]); } } - return std::make_shared(audioBus); + return audioBuffer; } } // namespace audioapi diff --git a/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.h b/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.h index ac87f79a9..173c24ce7 100644 --- a/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.h +++ b/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.h @@ -18,7 +18,7 @@ struct CallbackData { namespace audioapi { -class AudioBus; +class AudioBuffer; class CircularAudioArray; class AudioEventHandlerRegistry; diff --git a/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.mm b/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.mm index fdcf7c58d..63356f1e6 100644 --- a/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.mm +++ b/packages/react-native-audio-api/ios/audioapi/ios/core/utils/IOSRecorderCallback.mm @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include @@ -39,8 +39,8 @@ converterInputBuffer_ = nil; converterOutputBuffer_ = nil; - for (int i = 0; i < channelCount_; ++i) { - circularBus_[i]->zero(); + for (size_t i = 0; i < channelCount_; ++i) { + circularBuffer_[i]->zero(); } } } @@ -105,7 +105,7 @@ void IOSRecorderCallback::cleanup() { @autoreleasepool { - if (circularBus_[0]->getNumberOfAvailableFrames() > 0) { + if (circularBuffer_[0]->getNumberOfAvailableFrames() > 0) { emitAudioData(true); } @@ -115,8 +115,8 @@ converterInputBuffer_ = nil; converterOutputBuffer_ = nil; - for (int i = 0; i < channelCount_; ++i) { - circularBus_[i]->zero(); + for (size_t i = 0; i < channelCount_; ++i) { + circularBuffer_[i]->zero(); } offloader_.reset(); } @@ -147,12 +147,12 @@ if (bufferFormat_.sampleRate == sampleRate_ && bufferFormat_.channelCount == channelCount_ && !bufferFormat_.isInterleaved) { // Directly write to circular buffer - for (int i = 0; i < channelCount_; ++i) { - auto *inputChannel = static_cast(inputBuffer->mBuffers[i].mData); - circularBus_[i]->push_back(inputChannel, numFrames); + for (size_t i = 0; i < channelCount_; ++i) { + auto *data = static_cast(inputBuffer->mBuffers[i].mData); + circularBuffer_[i]->push_back(data, numFrames); } - if (circularBus_[0]->getNumberOfAvailableFrames() >= bufferLength_) { + if (circularBuffer_[0]->getNumberOfAvailableFrames() >= bufferLength_) { emitAudioData(); } return; @@ -193,13 +193,12 @@ return; } - for (int i = 0; i < channelCount_; ++i) { - auto *inputChannel = - static_cast(converterOutputBuffer_.audioBufferList->mBuffers[i].mData); - circularBus_[i]->push_back(inputChannel, outputFrameCount); + for (size_t i = 0; i < channelCount_; ++i) { + auto *data = static_cast(converterOutputBuffer_.audioBufferList->mBuffers[i].mData); + circularBuffer_[i]->push_back(data, outputFrameCount); } - if (circularBus_[0]->getNumberOfAvailableFrames() >= bufferLength_) { + if (circularBuffer_[0]->getNumberOfAvailableFrames() >= bufferLength_) { emitAudioData(); } }