diff --git a/src/audio_worklet.js b/src/audio_worklet.js index 6ed827af22e7d..3328e12e52295 100644 --- a/src/audio_worklet.js +++ b/src/audio_worklet.js @@ -31,12 +31,44 @@ function createWasmAudioWorkletProcessor(audioParams) { let opts = args.processorOptions; this.callbackFunction = Module['wasmTable'].get(opts['cb']); this.userData = opts['ud']; + // Then the samples per channel to process, fixed for the lifetime of the - // context that created this processor. Note for when moving to Web Audio - // 1.1: the typed array passed to process() should be the same size as this - // 'render quantum size', and this exercise of passing in the value - // shouldn't be required (to be verified). + // context that created this processor. Even though this 'render quantum + // size' is fixed at 128 samples in the 1.0 spec, it will be variable in + // the 1.1 spec. It's passed in now, just to prove it's settable, but will + // eventually be a property of the AudioWorkletGlobalScope (globalThis). this.samplesPerChannel = opts['sc']; + + // Create up-front as many typed views for marshalling the output data as + // may be required (with an arbitrary maximum of 10, for the case where a + // multi-MB stack is passed), allocated at the *top* of the worklet's + // stack (and whose addresses are fixed). The 'minimum alloc' firstly + // stops STACK_OVERFLOW_CHECK failing (since the stack will be full, and + // 16 being the minimum allocation size due to alignments) and leaves room + // for a single AudioSampleFrame as a minumum. + this.maxBuffers = Math.min(((Module['sz'] - /*minimum alloc*/ 16) / (this.samplesPerChannel * 4)) | 0, /*sensible limit*/ 10); +#if ASSERTIONS + console.assert(this.maxBuffers > 0, `AudioWorklet needs more stack allocating (at least ${this.samplesPerChannel * 4})`); +#endif + // These are still alloc'd to take advantage of the overflow checks, etc. + var oldStackPtr = stackSave(); + var viewDataIdx = stackAlloc(this.maxBuffers * this.samplesPerChannel * 4) >> 2; +#if WEBAUDIO_DEBUG + console.log(`AudioWorklet creating ${this.maxBuffers} buffer one-time views (for a stack size of ${Module['sz']})`); +#endif + this.outputViews = []; + for (var i = this.maxBuffers; i > 0; i--) { + // Added in reverse so the lowest indices are closest to the stack top + this.outputViews.unshift( + HEAPF32.subarray(viewDataIdx, viewDataIdx += this.samplesPerChannel) + ); + } + stackRestore(oldStackPtr); + +#if ASSERTIONS + // Explicitly verify this later in process() + this.ctorOldStackPtr = oldStackPtr; +#endif } static get parameterDescriptors() { @@ -52,22 +84,36 @@ function createWasmAudioWorkletProcessor(audioParams) { numOutputs = outputList.length, numParams = 0, i, j, k, dataPtr, bytesPerChannel = this.samplesPerChannel * 4, + outputViewsNeeded = 0, stackMemoryNeeded = (numInputs + numOutputs) * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}, oldStackPtr = stackSave(), - inputsPtr, outputsPtr, outputDataPtr, paramsPtr, + inputsPtr, outputsPtr, paramsPtr, didProduceAudio, paramArray; - // Calculate how much stack space is needed. + // Calculate how much stack space is needed for (i of inputList) stackMemoryNeeded += i.length * bytesPerChannel; - for (i of outputList) stackMemoryNeeded += i.length * bytesPerChannel; + for (i of outputList) outputViewsNeeded += i.length; + stackMemoryNeeded += outputViewsNeeded * bytesPerChannel; for (i in parameters) stackMemoryNeeded += parameters[i].byteLength + {{{ C_STRUCTS.AudioParamFrame.__size__ }}}, ++numParams; - // Allocate the necessary stack space. - inputsPtr = stackAlloc(stackMemoryNeeded); +#if ASSERTIONS + console.assert(oldStackPtr == this.ctorOldStackPtr, 'AudioWorklet stack address has unexpectedly moved'); + console.assert(outputViewsNeeded <= this.outputViews.length, `Too many AudioWorklet outputs (need ${outputViewsNeeded} but have stack space for ${this.outputViews.length})`); +#endif + + // Allocate the necessary stack space (dataPtr is always in bytes, and + // advances as space for structs and data is taken, but note the switching + // between bytes and indices into the various heaps, usually in 'k'). This + // will be 16-byte aligned (from _emscripten_stack_alloc()), as were the + // output views, so we round up and advance the required bytes to ensure + // the addresses all work out at the end. + i = (stackMemoryNeeded + 15) & ~15; + dataPtr = stackAlloc(i) + (i - stackMemoryNeeded); // Copy input audio descriptor structs and data to Wasm + inputsPtr = dataPtr; k = inputsPtr >> 2; - dataPtr = inputsPtr + numInputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; + dataPtr += numInputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; for (i of inputList) { // Write the AudioSampleFrame struct instance HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; @@ -81,20 +127,6 @@ function createWasmAudioWorkletProcessor(audioParams) { } } - // Copy output audio descriptor structs to Wasm - outputsPtr = dataPtr; - k = outputsPtr >> 2; - outputDataPtr = (dataPtr += numOutputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}) >> 2; - for (i of outputList) { - // Write the AudioSampleFrame struct instance - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / 4 }}}] = this.samplesPerChannel; - HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr; - k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}}; - // Reserve space for the output data - dataPtr += bytesPerChannel * i.length; - } - // Copy parameters descriptor structs and data to Wasm paramsPtr = dataPtr; k = paramsPtr >> 2; @@ -109,17 +141,52 @@ function createWasmAudioWorkletProcessor(audioParams) { dataPtr += paramArray.length*4; } + // Copy output audio descriptor structs to Wasm (note that dataPtr after + // the struct offsets should now be 16-byte aligned). + outputsPtr = dataPtr; + k = outputsPtr >> 2; + dataPtr += numOutputs * {{{ C_STRUCTS.AudioSampleFrame.__size__ }}}; + for (i of outputList) { + // Write the AudioSampleFrame struct instance + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.numberOfChannels / 4 }}}] = i.length; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.samplesPerChannel / 4 }}}] = this.samplesPerChannel; + HEAPU32[k + {{{ C_STRUCTS.AudioSampleFrame.data / 4 }}}] = dataPtr; + k += {{{ C_STRUCTS.AudioSampleFrame.__size__ / 4 }}}; + // Advance the output pointer to the next output (matching the pre-allocated views) + dataPtr += bytesPerChannel * i.length; + } + +#if ASSERTIONS + // If all the maths worked out, we arrived at the original stack address + console.assert(dataPtr == oldStackPtr, `AudioWorklet stack missmatch (audio data finishes at ${dataPtr} instead of ${oldStackPtr})`); + + // Sanity checks. If these trip the most likely cause, beyond unforeseen + // stack shenanigans, is that the 'render quantum size' changed. + if (numOutputs) { + // First that the output view addresses match the stack positions. + k = dataPtr - bytesPerChannel; + for (i = 0; i < outputViewsNeeded; i++) { + console.assert(k == this.outputViews[i].byteOffset, 'AudioWorklet internal error in addresses of the output array views'); + k -= bytesPerChannel; + } + // And that the views' size match the passed in output buffers + for (i of outputList) { + for (j of i) { + console.assert(j.byteLength == bytesPerChannel, `AudioWorklet unexpected output buffer size (expected ${bytesPerChannel} got ${j.byteLength})`); + } + } + } +#endif + // Call out to Wasm callback to perform audio processing if (didProduceAudio = this.callbackFunction(numInputs, inputsPtr, numOutputs, outputsPtr, numParams, paramsPtr, this.userData)) { // Read back the produced audio data to all outputs and their channels. - // (A garbage-free function TypedArray.copy(dstTypedArray, dstOffset, - // srcTypedArray, srcOffset, count) would sure be handy.. but web does - // not have one, so manually copy all bytes in) + // The preallocated 'outputViews' already have the correct offsets and + // sizes into the stack (recall from the ctor that they run backwards). + k = outputViewsNeeded - 1; for (i of outputList) { for (j of i) { - for (k = 0; k < this.samplesPerChannel; ++k) { - j[k] = HEAPF32[outputDataPtr++]; - } + j.set(this.outputViews[k--]); } } } diff --git a/src/library_webaudio.js b/src/library_webaudio.js index f4269e9759baa..f3b01f633ea52 100644 --- a/src/library_webaudio.js +++ b/src/library_webaudio.js @@ -164,7 +164,10 @@ let LibraryWebAudio = { let audioWorkletCreationFailed = () => { #if WEBAUDIO_DEBUG - console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed!`); + // Note about Cross-Origin here: a lack of Cross-Origin-Opener-Policy and + // Cross-Origin-Embedder-Policy headers to the client request will result + // in the worklet file failing to load. + console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed! Are the Cross-Origin headers being set?`); #endif {{{ makeDynCall('viip', 'callback') }}}(contextHandle, 0/*EM_FALSE*/, userData); }; diff --git a/test/test_browser.py b/test/test_browser.py index 0e8d7edee8a93..21b6011001a6c 100644 --- a/test/test_browser.py +++ b/test/test_browser.py @@ -5418,6 +5418,18 @@ def test_audio_worklet_post_function(self, args): def test_audio_worklet_modularize(self, args): self.btest_exit('webaudio/audioworklet.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-sMODULARIZE=1', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')] + args) + # Tests multiple inputs, forcing a larger stack (note: passing BROWSER_TEST is + # specific to this test to allow it to exit rather than play forever). + @parameterized({ + '': ([],), + 'minimal_with_closure': (['-sMINIMAL_RUNTIME', '--closure=1', '-Oz'],), + }) + def test_audio_worklet_stereo_io(self, args): + os.mkdir('audio_files') + shutil.copy(test_file('webaudio/audio_files/emscripten-beat.mp3'), 'audio_files/') + shutil.copy(test_file('webaudio/audio_files/emscripten-bass.mp3'), 'audio_files/') + self.btest_exit('webaudio/audioworklet_in_out_stereo.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-DBROWSER_TEST'] + args) + def test_error_reporting(self): # Test catching/reporting Error objects create_file('post.js', 'throw new Error("oops");') diff --git a/test/test_interactive.py b/test/test_interactive.py index 511ba785ce831..78de7771cd5f7 100644 --- a/test/test_interactive.py +++ b/test/test_interactive.py @@ -306,6 +306,34 @@ def test_audio_worklet_tone_generator(self): def test_audio_worklet_modularize(self): self.btest('webaudio/audioworklet.c', expected='0', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS', '-sMINIMAL_RUNTIME', '-sMODULARIZE']) + # Tests an AudioWorklet with multiple stereo inputs mixing in the processor to a single stereo output (4kB stack) + def test_audio_worklet_stereo_io(self): + os.mkdir('audio_files') + shutil.copy(test_file('webaudio/audio_files/emscripten-beat.mp3'), 'audio_files/') + shutil.copy(test_file('webaudio/audio_files/emscripten-bass.mp3'), 'audio_files/') + self.btest_exit('webaudio/audioworklet_in_out_stereo.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS']) + + # Tests an AudioWorklet with multiple stereo inputs copying in the processor to multiple stereo outputs (6kB stack) + def test_audio_worklet_2x_stereo_io(self): + os.mkdir('audio_files') + shutil.copy(test_file('webaudio/audio_files/emscripten-beat.mp3'), 'audio_files/') + shutil.copy(test_file('webaudio/audio_files/emscripten-bass.mp3'), 'audio_files/') + self.btest_exit('webaudio/audioworklet_2x_in_out_stereo.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS']) + + # Tests an AudioWorklet with multiple mono inputs mixing in the processor to a single mono output (2kB stack) + def test_audio_worklet_mono_io(self): + os.mkdir('audio_files') + shutil.copy(test_file('webaudio/audio_files/emscripten-beat-mono.mp3'), 'audio_files/') + shutil.copy(test_file('webaudio/audio_files/emscripten-bass-mono.mp3'), 'audio_files/') + self.btest_exit('webaudio/audioworklet_in_out_mono.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS']) + + # Tests an AudioWorklet with multiple mono inputs copying in the processor to L+R stereo outputs (3kB stack) + def test_audio_worklet_2x_hard_pan_io(self): + os.mkdir('audio_files') + shutil.copy(test_file('webaudio/audio_files/emscripten-beat-mono.mp3'), 'audio_files/') + shutil.copy(test_file('webaudio/audio_files/emscripten-bass-mono.mp3'), 'audio_files/') + self.btest_exit('webaudio/audioworklet_2x_in_hard_pan.c', args=['-sAUDIO_WORKLET', '-sWASM_WORKERS']) + class interactive64(interactive): def setUp(self): diff --git a/test/webaudio/audio_files/README.md b/test/webaudio/audio_files/README.md new file mode 100644 index 0000000000000..cc9ef55c4c831 --- /dev/null +++ b/test/webaudio/audio_files/README.md @@ -0,0 +1,5 @@ +Emscripten Beat and Emscripten Bass by [CoLD SToRAGE](https://www.coldstorage.org.uk) (Tim Wright). + +Released under the [Creative Commons Zero (CC0)](https://creativecommons.org/publicdomain/zero/1.0/) Public Domain Dedication. + +To the extent possible under law, OGP Phonogramatica has waived all copyright and related or neighbouring rights to these works. diff --git a/test/webaudio/audio_files/emscripten-bass-mono.mp3 b/test/webaudio/audio_files/emscripten-bass-mono.mp3 new file mode 100644 index 0000000000000..fc0b15a0c7d90 Binary files /dev/null and b/test/webaudio/audio_files/emscripten-bass-mono.mp3 differ diff --git a/test/webaudio/audio_files/emscripten-bass.mp3 b/test/webaudio/audio_files/emscripten-bass.mp3 new file mode 100644 index 0000000000000..e1cd05d3d1d5d Binary files /dev/null and b/test/webaudio/audio_files/emscripten-bass.mp3 differ diff --git a/test/webaudio/audio_files/emscripten-beat-mono.mp3 b/test/webaudio/audio_files/emscripten-beat-mono.mp3 new file mode 100644 index 0000000000000..aa48ba0b97ffb Binary files /dev/null and b/test/webaudio/audio_files/emscripten-beat-mono.mp3 differ diff --git a/test/webaudio/audio_files/emscripten-beat.mp3 b/test/webaudio/audio_files/emscripten-beat.mp3 new file mode 100644 index 0000000000000..cc42aa7970140 Binary files /dev/null and b/test/webaudio/audio_files/emscripten-beat.mp3 differ diff --git a/test/webaudio/audioworklet_2x_in_hard_pan.c b/test/webaudio/audioworklet_2x_in_hard_pan.c new file mode 100644 index 0000000000000..c599adb493bbd --- /dev/null +++ b/test/webaudio/audioworklet_2x_in_hard_pan.c @@ -0,0 +1,155 @@ +#include +#include +#include + +#include +#include + +// Tests two mono audio inputs being copied to the left and right channels of a +// single stereo output (with a hard pan). + +// This needs to be big enough for the stereo output, 2x mono inputs and the worker stack +#define AUDIO_STACK_SIZE 3072 + +// Helper for MEMORY64 to cast an audio context or type to a void* +#define WA_2_VOIDP(ctx) ((void*) (intptr_t) ctx) +// Helper for MEMORY64 to cast a void* to an audio context or type +#define VOIDP_2_WA(ptr) ((EMSCRIPTEN_WEBAUDIO_T) (intptr_t) ptr) + + +// Count the audio callbacks and return after 375 frames (1 second with the default 128 size) +// +// *** Remove this in your own code *** +// +volatile int audioProcessedCount = 0; +bool playedAndMixed(double time, void* data) { + if (audioProcessedCount >= 375) { + emscripten_force_exit(0); + return false; + } + return true; +} + +// ID to the beat and bass loops +EMSCRIPTEN_WEBAUDIO_T beatID = 0; +EMSCRIPTEN_WEBAUDIO_T bassID = 0; + +// Creates a MediaElementAudioSourceNode with the supplied URL (which is +// registered as an internal audio object and the ID returned). +EM_JS(EMSCRIPTEN_WEBAUDIO_T, createTrack, (EMSCRIPTEN_WEBAUDIO_T ctxID, const char* url, bool looping), { + var context = emscriptenGetAudioObject(ctxID); + if (context) { + var audio = document.createElement('audio'); + audio.src = UTF8ToString(url); + audio.loop = looping; + var track = context.createMediaElementSource(audio); + return emscriptenRegisterAudioObject(track); + } + return 0; +}); + +// Toggles the play/pause of a MediaElementAudioSourceNode given its ID +EM_JS(void, toggleTrack, (EMSCRIPTEN_WEBAUDIO_T srcID), { + var source = emscriptenGetAudioObject(srcID); + if (source) { + var audio = source.mediaElement; + if (audio) { + if (audio.paused) { + audio.currentTime = 0; + audio.play(); + } else { + audio.pause(); + } + } + } +}); + +// Callback to process and copy the audio tracks +bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { + audioProcessedCount++; + + // Twin mono in, single stereo out + assert(numInputs == 2 && numOutputs == 1); + assert(inputs[0].numberOfChannels == 1 && inputs[1].numberOfChannels == 1); + assert(outputs[0].numberOfChannels == 2); + // All with the same number of samples + assert(inputs[0].samplesPerChannel == inputs[1].samplesPerChannel); + assert(inputs[0].samplesPerChannel == outputs[0].samplesPerChannel); + // Now with all known quantities we can memcpy the data + int samplesPerChannel = inputs[0].samplesPerChannel; + memcpy(outputs[0].data, inputs[0].data, samplesPerChannel * sizeof(float)); + memcpy(outputs[0].data + samplesPerChannel, inputs[1].data, samplesPerChannel * sizeof(float)); + return true; +} + +// Registered click even to (1) enable audio playback and (2) toggle playing the tracks +bool onClick(int type, const EmscriptenMouseEvent* e, void* data) { + EMSCRIPTEN_WEBAUDIO_T ctx = VOIDP_2_WA(data); + if (emscripten_audio_context_state(ctx) != AUDIO_CONTEXT_STATE_RUNNING) { + printf("Resuming playback\n"); + emscripten_resume_audio_context_sync(ctx); + } + printf("Toggling audio playback\n"); + toggleTrack(beatID); + toggleTrack(bassID); + return false; +} + +// Audio processor created, now register the audio callback +void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet processor created\n"); + printf("Click to toggle audio playback\n"); + + // Stereo output, two inputs + int outputChannelCounts[2] = { 2 }; + EmscriptenAudioWorkletNodeCreateOptions opts = { + .numberOfInputs = 2, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + EMSCRIPTEN_AUDIO_WORKLET_NODE_T worklet = emscripten_create_wasm_audio_worklet_node(context, "mixer", &opts, &process, NULL); + emscripten_audio_node_connect(worklet, context, 0, 0); + + // Create the two mono source nodes and connect them to the two inputs + // Note: we can connect the sources to the same input and it'll get mixed for us, but that's not the point + beatID = createTrack(context, "audio_files/emscripten-beat-mono.mp3", true); + if (beatID) { + emscripten_audio_node_connect(beatID, worklet, 0, 0); + } + bassID = createTrack(context, "audio_files/emscripten-bass-mono.mp3", true); + if (bassID) { + emscripten_audio_node_connect(bassID, worklet, 0, 1); + } + + // Register a click to start playback + emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); + + // Register the counter that exits the test after one second of mixing + emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); + } else { + printf("Audio worklet node creation failed\n"); + } +} + +// Worklet thread inited, now create the audio processor +void initialised(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet initialised\n"); + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "mixer", + }; + emscripten_create_wasm_audio_worklet_processor_async(context, &opts, &processorCreated, NULL); + } else { + printf("Audio worklet failed to initialise\n"); + } +} + +int main() { + static char workletStack[AUDIO_STACK_SIZE]; + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(NULL); + emscripten_start_wasm_audio_worklet_thread_async(context, workletStack, sizeof workletStack, &initialised, NULL); + emscripten_runtime_keepalive_push(); + return 0; +} diff --git a/test/webaudio/audioworklet_2x_in_out_stereo.c b/test/webaudio/audioworklet_2x_in_out_stereo.c new file mode 100644 index 0000000000000..981fdbc35cac3 --- /dev/null +++ b/test/webaudio/audioworklet_2x_in_out_stereo.c @@ -0,0 +1,156 @@ +#include +#include +#include + +#include +#include + +// Tests two stereo audio inputs being copied to two stereo outputs. + +// This needs to be big enough for the 2x stereo outputs, 2x inputs and the worker stack +#define AUDIO_STACK_SIZE 6144 + +// Helper for MEMORY64 to cast an audio context or type to a void* +#define WA_2_VOIDP(ctx) ((void*) (intptr_t) ctx) +// Helper for MEMORY64 to cast a void* to an audio context or type +#define VOIDP_2_WA(ptr) ((EMSCRIPTEN_WEBAUDIO_T) (intptr_t) ptr) + +// Count the audio callbacks and return after 375 frames (1 second with the default 128 size) +// +// *** Remove this in your own code *** +// +volatile int audioProcessedCount = 0; +bool playedAndMixed(double time, void* data) { + if (audioProcessedCount >= 375) { + emscripten_force_exit(0); + return false; + } + return true; +} + +// ID to the beat and bass loops +EMSCRIPTEN_WEBAUDIO_T beatID = 0; +EMSCRIPTEN_WEBAUDIO_T bassID = 0; + +// Creates a MediaElementAudioSourceNode with the supplied URL (which is +// registered as an internal audio object and the ID returned). +EM_JS(EMSCRIPTEN_WEBAUDIO_T, createTrack, (EMSCRIPTEN_WEBAUDIO_T ctxID, const char* url, bool looping), { + var context = emscriptenGetAudioObject(ctxID); + if (context) { + var audio = document.createElement('audio'); + audio.src = UTF8ToString(url); + audio.loop = looping; + var track = context.createMediaElementSource(audio); + return emscriptenRegisterAudioObject(track); + } + return 0; +}); + +// Toggles the play/pause of a MediaElementAudioSourceNode given its ID +EM_JS(void, toggleTrack, (EMSCRIPTEN_WEBAUDIO_T srcID), { + var source = emscriptenGetAudioObject(srcID); + if (source) { + var audio = source.mediaElement; + if (audio) { + if (audio.paused) { + audio.currentTime = 0; + audio.play(); + } else { + audio.pause(); + } + } + } +}); + +// Callback to process and copy the audio tracks +bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { + audioProcessedCount++; + + // Twin stereo in and out + assert(numInputs == 2 && numOutputs == 2); + assert(inputs[0].numberOfChannels == 2 && inputs[1].numberOfChannels == 2); + assert(outputs[0].numberOfChannels == 2 && outputs[1].numberOfChannels == 2); + // All with the same number of samples + assert(inputs[0].samplesPerChannel == inputs[1].samplesPerChannel); + assert(inputs[0].samplesPerChannel == outputs[0].samplesPerChannel); + assert(outputs[0].samplesPerChannel == outputs[1].samplesPerChannel); + // Now with all known quantities we can memcpy the data + int totalSamples = outputs[0].samplesPerChannel * outputs[0].numberOfChannels; + memcpy(outputs[0].data, inputs[0].data, totalSamples * sizeof(float)); + memcpy(outputs[1].data, inputs[1].data, totalSamples * sizeof(float)); + return true; +} + +// Registered click even to (1) enable audio playback and (2) toggle playing the tracks +bool onClick(int type, const EmscriptenMouseEvent* e, void* data) { + EMSCRIPTEN_WEBAUDIO_T ctx = VOIDP_2_WA(data); + if (emscripten_audio_context_state(ctx) != AUDIO_CONTEXT_STATE_RUNNING) { + printf("Resuming playback\n"); + emscripten_resume_audio_context_sync(ctx); + } + printf("Toggling audio playback\n"); + toggleTrack(beatID); + toggleTrack(bassID); + return false; +} + +// Audio processor created, now register the audio callback +void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet processor created\n"); + printf("Click to toggle audio playback\n"); + + // Two stereo outputs, two inputs + int outputChannelCounts[2] = { 2, 2 }; + EmscriptenAudioWorkletNodeCreateOptions opts = { + .numberOfInputs = 2, + .numberOfOutputs = 2, + .outputChannelCounts = outputChannelCounts + }; + EMSCRIPTEN_AUDIO_WORKLET_NODE_T worklet = emscripten_create_wasm_audio_worklet_node(context, "mixer", &opts, &process, NULL); + // Both outputs connected to the context + emscripten_audio_node_connect(worklet, context, 0, 0); + emscripten_audio_node_connect(worklet, context, 1, 0); + + // Create the two stereo source nodes and connect them to the two inputs + // Note: we can connect the sources to the same input and it'll get mixed for us, but that's not the point + beatID = createTrack(context, "audio_files/emscripten-beat.mp3", true); + if (beatID) { + emscripten_audio_node_connect(beatID, worklet, 0, 0); + } + bassID = createTrack(context, "audio_files/emscripten-bass.mp3", true); + if (bassID) { + emscripten_audio_node_connect(bassID, worklet, 0, 1); + } + + // Register a click to start playback + emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); + + // Register the counter that exits the test after one second of mixing + emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); + } else { + printf("Audio worklet node creation failed\n"); + } +} + +// Worklet thread inited, now create the audio processor +void initialised(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet initialised\n"); + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "mixer", + }; + emscripten_create_wasm_audio_worklet_processor_async(context, &opts, &processorCreated, NULL); + } else { + printf("Audio worklet failed to initialise\n"); + } +} + +int main() { + static char workletStack[AUDIO_STACK_SIZE]; + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(NULL); + emscripten_start_wasm_audio_worklet_thread_async(context, workletStack, sizeof workletStack, &initialised, NULL); + emscripten_runtime_keepalive_push(); + return 0; +} diff --git a/test/webaudio/audioworklet_in_out_mono.c b/test/webaudio/audioworklet_in_out_mono.c new file mode 100644 index 0000000000000..fa4df382c80a8 --- /dev/null +++ b/test/webaudio/audioworklet_in_out_mono.c @@ -0,0 +1,166 @@ +#include +#include +#include + +#include +#include + +// Tests processing two mono audio inputs being mixed to a single mono audio +// output in process() (by adding the inputs together). + +// This needs to be big enough for the mono output, 2x inputs and the worker stack +#define AUDIO_STACK_SIZE 2048 + +// Helper for MEMORY64 to cast an audio context or type to a void* +#define WA_2_VOIDP(ctx) ((void*) (intptr_t) ctx) +// Helper for MEMORY64 to cast a void* to an audio context or type +#define VOIDP_2_WA(ptr) ((EMSCRIPTEN_WEBAUDIO_T) (intptr_t) ptr) + +// Count the audio callbacks and return after 375 frames (1 second with the default 128 size) +// +// *** Remove this in your own code *** +// +volatile int audioProcessedCount = 0; +bool playedAndMixed(double time, void* data) { + if (audioProcessedCount >= 375) { + emscripten_force_exit(0); + return false; + } + return true; +} + +// ID to the beat and bass loops +EMSCRIPTEN_WEBAUDIO_T beatID = 0; +EMSCRIPTEN_WEBAUDIO_T bassID = 0; + +// Creates a MediaElementAudioSourceNode with the supplied URL (which is +// registered as an internal audio object and the ID returned). +EM_JS(EMSCRIPTEN_WEBAUDIO_T, createTrack, (EMSCRIPTEN_WEBAUDIO_T ctxID, const char* url, bool looping), { + var context = emscriptenGetAudioObject(ctxID); + if (context) { + var audio = document.createElement('audio'); + audio.src = UTF8ToString(url); + audio.loop = looping; + var track = context.createMediaElementSource(audio); + return emscriptenRegisterAudioObject(track); + } + return 0; +}); + +// Toggles the play/pause of a MediaElementAudioSourceNode given its ID +EM_JS(void, toggleTrack, (EMSCRIPTEN_WEBAUDIO_T srcID), { + var source = emscriptenGetAudioObject(srcID); + if (source) { + var audio = source.mediaElement; + if (audio) { + if (audio.paused) { + audio.currentTime = 0; + audio.play(); + } else { + audio.pause(); + } + } + } +}); + +// Callback to process and mix the audio tracks +bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { + audioProcessedCount++; + + // Single mono output + assert(numOutputs == 1 && outputs[0].numberOfChannels == 1); + for (int n = 0; n < numInputs; n++) { + // And all inputs are also stereo + assert(inputs[n].numberOfChannels == 1 || inputs[n].numberOfChannels == 0); + // This should always be the case + assert(inputs[n].samplesPerChannel == outputs[0].samplesPerChannel); + } + // We can now do a quick mix since we know the layouts + if (numInputs > 0) { + int totalSamples = outputs[0].samplesPerChannel * outputs[0].numberOfChannels; + float* outputData = outputs[0].data; + memcpy(outputData, inputs[0].data, totalSamples * sizeof(float)); + for (int n = 1; n < numInputs; n++) { + // It's possible to have an input with no channels + if (inputs[n].numberOfChannels == 1) { + float* inputData = inputs[n].data; + for (int i = totalSamples - 1; i >= 0; i--) { + outputData[i] += inputData[i]; + } + } + } + } + return true; +} + +// Registered click even to (1) enable audio playback and (2) toggle playing the tracks +bool onClick(int type, const EmscriptenMouseEvent* e, void* data) { + EMSCRIPTEN_WEBAUDIO_T ctx = VOIDP_2_WA(data); + if (emscripten_audio_context_state(ctx) != AUDIO_CONTEXT_STATE_RUNNING) { + printf("Resuming playback\n"); + emscripten_resume_audio_context_sync(ctx); + } + printf("Toggling audio playback\n"); + toggleTrack(beatID); + toggleTrack(bassID); + return false; +} + +// Audio processor created, now register the audio callback +void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet processor created\n"); + printf("Click to toggle audio playback\n"); + + // Mono output, two inputs + int outputChannelCounts[1] = { 1 }; + EmscriptenAudioWorkletNodeCreateOptions opts = { + .numberOfInputs = 2, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + EMSCRIPTEN_AUDIO_WORKLET_NODE_T worklet = emscripten_create_wasm_audio_worklet_node(context, "mixer", &opts, &process, NULL); + emscripten_audio_node_connect(worklet, context, 0, 0); + + // Create the two mono source nodes and connect them to the two inputs + // Note: we can connect the sources to the same input and it'll get mixed for us, but that's not the point + beatID = createTrack(context, "audio_files/emscripten-beat-mono.mp3", true); + if (beatID) { + emscripten_audio_node_connect(beatID, worklet, 0, 0); + } + bassID = createTrack(context, "audio_files/emscripten-bass-mono.mp3", true); + if (bassID) { + emscripten_audio_node_connect(bassID, worklet, 0, 1); + } + + // Register a click to start playback + emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); + + // Register the counter that exits the test after one second of mixing + emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); + } else { + printf("Audio worklet node creation failed\n"); + } +} + +// Worklet thread inited, now create the audio processor +void initialised(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet initialised\n"); + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "mixer", + }; + emscripten_create_wasm_audio_worklet_processor_async(context, &opts, &processorCreated, NULL); + } else { + printf("Audio worklet failed to initialise\n"); + } +} + +int main() { + static char workletStack[AUDIO_STACK_SIZE]; + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(NULL); + emscripten_start_wasm_audio_worklet_thread_async(context, workletStack, sizeof workletStack, &initialised, NULL); + emscripten_runtime_keepalive_push(); + return 0; +} diff --git a/test/webaudio/audioworklet_in_out_stereo.c b/test/webaudio/audioworklet_in_out_stereo.c new file mode 100644 index 0000000000000..6d153426f0da9 --- /dev/null +++ b/test/webaudio/audioworklet_in_out_stereo.c @@ -0,0 +1,169 @@ +#include +#include +#include + +#include +#include + +// Tests processing two stereo audio inputs being mixed to a single stereo audio +// output in process() (by adding the inputs together). + +// This needs to be big enough for the stereo output, 2x inputs and the worker stack +#define AUDIO_STACK_SIZE 4096 + +// Helper for MEMORY64 to cast an audio context or type to a void* +#define WA_2_VOIDP(ctx) ((void*) (intptr_t) ctx) +// Helper for MEMORY64 to cast a void* to an audio context or type +#define VOIDP_2_WA(ptr) ((EMSCRIPTEN_WEBAUDIO_T) (intptr_t) ptr) + +// Count the audio callbacks and return after 375 frames (1 second with the default 128 size) +// +// *** Remove this in your own code *** +// +volatile int audioProcessedCount = 0; +bool playedAndMixed(double time, void* data) { + if (audioProcessedCount >= 375) { + emscripten_force_exit(0); + return false; + } + return true; +} + +// ID to the beat and bass loops +EMSCRIPTEN_WEBAUDIO_T beatID = 0; +EMSCRIPTEN_WEBAUDIO_T bassID = 0; + +// Creates a MediaElementAudioSourceNode with the supplied URL (which is +// registered as an internal audio object and the ID returned). +EM_JS(EMSCRIPTEN_WEBAUDIO_T, createTrack, (EMSCRIPTEN_WEBAUDIO_T ctxID, const char* url, bool looping), { + var context = emscriptenGetAudioObject(ctxID); + if (context) { + var audio = document.createElement('audio'); + audio.src = UTF8ToString(url); + audio.loop = looping; + var track = context.createMediaElementSource(audio); + return emscriptenRegisterAudioObject(track); + } + return 0; +}); + +// Toggles the play/pause of a MediaElementAudioSourceNode given its ID +EM_JS(void, toggleTrack, (EMSCRIPTEN_WEBAUDIO_T srcID), { + var source = emscriptenGetAudioObject(srcID); + if (source) { + var audio = source.mediaElement; + if (audio) { + if (audio.paused) { + audio.currentTime = 0; + audio.play(); + } else { + audio.pause(); + } + } + } +}); + +// Callback to process and mix the audio tracks +bool process(int numInputs, const AudioSampleFrame* inputs, int numOutputs, AudioSampleFrame* outputs, int numParams, const AudioParamFrame* params, void* data) { + audioProcessedCount++; + + // Single stereo output + assert(numOutputs == 1 && outputs[0].numberOfChannels == 2); + for (int n = 0; n < numInputs; n++) { + // And all inputs are also stereo + assert(inputs[n].numberOfChannels == 2 || inputs[n].numberOfChannels == 0); + // This should always be the case + assert(inputs[n].samplesPerChannel == outputs[0].samplesPerChannel); + } + // We can now do a quick mix since we know the layouts + if (numInputs > 0) { + int totalSamples = outputs[0].samplesPerChannel * outputs[0].numberOfChannels; + float* outputData = outputs[0].data; + memcpy(outputData, inputs[0].data, totalSamples * sizeof(float)); + for (int n = 1; n < numInputs; n++) { + // It's possible to have an input with no channels + if (inputs[n].numberOfChannels == 2) { + float* inputData = inputs[n].data; + for (int i = totalSamples - 1; i >= 0; i--) { + outputData[i] += inputData[i]; + } + } + } + } + return true; +} + +// Registered click even to (1) enable audio playback and (2) toggle playing the tracks +bool onClick(int type, const EmscriptenMouseEvent* e, void* data) { + EMSCRIPTEN_WEBAUDIO_T ctx = VOIDP_2_WA(data); + if (emscripten_audio_context_state(ctx) != AUDIO_CONTEXT_STATE_RUNNING) { + printf("Resuming playback\n"); + emscripten_resume_audio_context_sync(ctx); + } + printf("Toggling audio playback\n"); + toggleTrack(beatID); + toggleTrack(bassID); + return false; +} + +// Audio processor created, now register the audio callback +void processorCreated(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet processor created\n"); + printf("Click to toggle audio playback\n"); + + // Stereo output, two inputs + int outputChannelCounts[1] = { 2 }; + EmscriptenAudioWorkletNodeCreateOptions opts = { + .numberOfInputs = 2, + .numberOfOutputs = 1, + .outputChannelCounts = outputChannelCounts + }; + EMSCRIPTEN_AUDIO_WORKLET_NODE_T worklet = emscripten_create_wasm_audio_worklet_node(context, "mixer", &opts, &process, NULL); + emscripten_audio_node_connect(worklet, context, 0, 0); + + // Create the two stereo source nodes and connect them to the two inputs + // Note: we can connect the sources to the same input and it'll get mixed for us, but that's not the point + beatID = createTrack(context, "audio_files/emscripten-beat.mp3", true); + if (beatID) { + emscripten_audio_node_connect(beatID, worklet, 0, 0); + } + bassID = createTrack(context, "audio_files/emscripten-bass.mp3", true); + if (bassID) { + emscripten_audio_node_connect(bassID, worklet, 0, 1); + } + + // Register a click to start playback + emscripten_set_click_callback(EMSCRIPTEN_EVENT_TARGET_DOCUMENT, WA_2_VOIDP(context), false, &onClick); + + // Register the counter that exits the test after one second of mixing + emscripten_set_timeout_loop(&playedAndMixed, 16, NULL); + } else { + printf("Audio worklet node creation failed\n"); + } +} + +// Worklet thread inited, now create the audio processor +void initialised(EMSCRIPTEN_WEBAUDIO_T context, bool success, void* data) { + if (success) { + printf("Audio worklet initialised\n"); + + WebAudioWorkletProcessorCreateOptions opts = { + .name = "mixer", + }; + emscripten_create_wasm_audio_worklet_processor_async(context, &opts, &processorCreated, NULL); + } else { + printf("Audio worklet failed to initialise\n"); + } +} + +int main() { + static char workletStack[AUDIO_STACK_SIZE]; + EMSCRIPTEN_WEBAUDIO_T context = emscripten_create_audio_context(NULL); + emscripten_start_wasm_audio_worklet_thread_async(context, workletStack, sizeof workletStack, &initialised, NULL); +#ifndef BROWSER_TEST + // Special case: browser tests need to exit instantly, interactive tests need to wait + emscripten_runtime_keepalive_push(); +#endif + return 0; +}