Commit 2b0c525e authored by David Reid's avatar David Reid

Web Audio: Fix ScriptProcessNode path when compiling with --closure=1.

Audio Worklets do not work with --closure=1 because the callback used
with emscripten_create_wasm_audio_worklet_processor_async never gets
fired which means miniaudio will never be able to escape from it's busy
wait loop.

Public issue https://github.com/mackron/miniaudio/issues/778
parent 6cba1592
...@@ -39791,7 +39791,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice) ...@@ -39791,7 +39791,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
#if defined(MA_USE_AUDIO_WORKLETS) #if defined(MA_USE_AUDIO_WORKLETS)
{ {
EM_ASM({ EM_ASM({
var device = miniaudio.get_device_by_index($0); var device = window.miniaudio.get_device_by_index($0);
if (device.streamNode !== undefined) { if (device.streamNode !== undefined) {
device.streamNode.disconnect(); device.streamNode.disconnect();
...@@ -39806,7 +39806,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice) ...@@ -39806,7 +39806,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
#else #else
{ {
EM_ASM({ EM_ASM({
var device = miniaudio.get_device_by_index($0); var device = window.miniaudio.get_device_by_index($0);
/* Make sure all nodes are disconnected and marked for collection. */ /* Make sure all nodes are disconnected and marked for collection. */
if (device.scriptNode !== undefined) { if (device.scriptNode !== undefined) {
...@@ -39833,7 +39833,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice) ...@@ -39833,7 +39833,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
/* Clean up the device on the JS side. */ /* Clean up the device on the JS side. */
EM_ASM({ EM_ASM({
miniaudio.untrack_device_by_index($0); window.miniaudio.untrack_device_by_index($0);
}, pDevice->webaudio.deviceIndex); }, pDevice->webaudio.deviceIndex);
ma_free(pDevice->webaudio.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks); ma_free(pDevice->webaudio.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks);
...@@ -39998,7 +39998,6 @@ static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T a ...@@ -39998,7 +39998,6 @@ static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T a
return; return;
} }
pParameters->pDevice->webaudio.audioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "miniaudio", &audioWorkletOptions, &ma_audio_worklet_process_callback__webaudio, pParameters->pDevice); pParameters->pDevice->webaudio.audioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "miniaudio", &audioWorkletOptions, &ma_audio_worklet_process_callback__webaudio, pParameters->pDevice);
/* With the audio worklet initialized we can now attach it to the graph. */ /* With the audio worklet initialized we can now attach it to the graph. */
...@@ -40138,7 +40137,6 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co ...@@ -40138,7 +40137,6 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
/* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */ /* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */
pDevice->webaudio.audioContext = emscripten_create_audio_context(&audioContextAttributes); pDevice->webaudio.audioContext = emscripten_create_audio_context(&audioContextAttributes);
/* /*
With the context created we can now create the worklet. We can only have a single worklet per audio With the context created we can now create the worklet. We can only have a single worklet per audio
context which means we'll need to craft this appropriately to handle duplex devices correctly. context which means we'll need to craft this appropriately to handle duplex devices correctly.
...@@ -40187,7 +40185,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co ...@@ -40187,7 +40185,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
/* We need to add an entry to the miniaudio.devices list on the JS side so we can do some JS/C interop. */ /* We need to add an entry to the miniaudio.devices list on the JS side so we can do some JS/C interop. */
pDevice->webaudio.deviceIndex = EM_ASM_INT({ pDevice->webaudio.deviceIndex = EM_ASM_INT({
return miniaudio.track_device({ return window.miniaudio.track_device({
webaudio: emscriptenGetAudioObject($0), webaudio: emscriptenGetAudioObject($0),
state: 1 /* 1 = ma_device_state_stopped */ state: 1 /* 1 = ma_device_state_stopped */
}); });
...@@ -40272,11 +40270,11 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co ...@@ -40272,11 +40270,11 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
/* The node processing callback. */ /* The node processing callback. */
device.scriptNode.onaudioprocess = function(e) { device.scriptNode.onaudioprocess = function(e) {
if (device.intermediaryBufferView == null || device.intermediaryBufferView.length == 0) { if (device.intermediaryBufferView == null || device.intermediaryBufferView.length == 0) {
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels); device.intermediaryBufferView = new Float32Array(HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels);
} }
/* Do the capture side first. */ /* Do the capture side first. */
if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) { if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) {
/* The data must be interleaved before being processed miniaudio. */ /* The data must be interleaved before being processed miniaudio. */
for (var iChannel = 0; iChannel < channels; iChannel += 1) { for (var iChannel = 0; iChannel < channels; iChannel += 1) {
var inputBuffer = e.inputBuffer.getChannelData(iChannel); var inputBuffer = e.inputBuffer.getChannelData(iChannel);
...@@ -40290,7 +40288,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co ...@@ -40290,7 +40288,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
_ma_device_process_pcm_frames_capture__webaudio(pDevice, bufferSize, pIntermediaryBuffer); _ma_device_process_pcm_frames_capture__webaudio(pDevice, bufferSize, pIntermediaryBuffer);
} }
if (deviceType == miniaudio.device_type.playback || deviceType == miniaudio.device_type.duplex) { if (deviceType == window.miniaudio.device_type.playback || deviceType == window.miniaudio.device_type.duplex) {
_ma_device_process_pcm_frames_playback__webaudio(pDevice, bufferSize, pIntermediaryBuffer); _ma_device_process_pcm_frames_playback__webaudio(pDevice, bufferSize, pIntermediaryBuffer);
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
...@@ -40310,7 +40308,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co ...@@ -40310,7 +40308,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
}; };
/* Now we need to connect our node to the graph. */ /* Now we need to connect our node to the graph. */
if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) { if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) {
navigator.mediaDevices.getUserMedia({audio:true, video:false}) navigator.mediaDevices.getUserMedia({audio:true, video:false})
.then(function(stream) { .then(function(stream) {
device.streamNode = device.webaudio.createMediaStreamSource(stream); device.streamNode = device.webaudio.createMediaStreamSource(stream);
...@@ -40322,13 +40320,13 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co ...@@ -40322,13 +40320,13 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
}); });
} }
if (deviceType == miniaudio.device_type.playback) { if (deviceType == window.miniaudio.device_type.playback) {
device.scriptNode.connect(device.webaudio.destination); device.scriptNode.connect(device.webaudio.destination);
} }
device.pDevice = pDevice; device.pDevice = pDevice;
return miniaudio.track_device(device); return window.miniaudio.track_device(device);
}, pConfig->deviceType, channels, sampleRate, periodSizeInFrames, pDevice->webaudio.pIntermediaryBuffer, pDevice); }, pConfig->deviceType, channels, sampleRate, periodSizeInFrames, pDevice->webaudio.pIntermediaryBuffer, pDevice);
if (deviceIndex < 0) { if (deviceIndex < 0) {
...@@ -40338,7 +40336,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co ...@@ -40338,7 +40336,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
pDevice->webaudio.deviceIndex = deviceIndex; pDevice->webaudio.deviceIndex = deviceIndex;
/* Grab the sample rate from the audio context directly. */ /* Grab the sample rate from the audio context directly. */
sampleRate = (ma_uint32)EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); sampleRate = (ma_uint32)EM_ASM_INT({ return window.miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
if (pDescriptorCapture != NULL) { if (pDescriptorCapture != NULL) {
pDescriptorCapture->format = ma_format_f32; pDescriptorCapture->format = ma_format_f32;
...@@ -40368,9 +40366,9 @@ static ma_result ma_device_start__webaudio(ma_device* pDevice) ...@@ -40368,9 +40366,9 @@ static ma_result ma_device_start__webaudio(ma_device* pDevice)
MA_ASSERT(pDevice != NULL); MA_ASSERT(pDevice != NULL);
EM_ASM({ EM_ASM({
var device = miniaudio.get_device_by_index($0); var device = window.miniaudio.get_device_by_index($0);
device.webaudio.resume(); device.webaudio.resume();
device.state = miniaudio.device_state.started; device.state = window.miniaudio.device_state.started;
}, pDevice->webaudio.deviceIndex); }, pDevice->webaudio.deviceIndex);
return MA_SUCCESS; return MA_SUCCESS;
...@@ -40390,9 +40388,9 @@ static ma_result ma_device_stop__webaudio(ma_device* pDevice) ...@@ -40390,9 +40388,9 @@ static ma_result ma_device_stop__webaudio(ma_device* pDevice)
do any kind of explicit draining. do any kind of explicit draining.
*/ */
EM_ASM({ EM_ASM({
var device = miniaudio.get_device_by_index($0); var device = window.miniaudio.get_device_by_index($0);
device.webaudio.suspend(); device.webaudio.suspend();
device.state = miniaudio.device_state.stopped; device.state = window.miniaudio.device_state.stopped;
}, pDevice->webaudio.deviceIndex); }, pDevice->webaudio.deviceIndex);
ma_device__on_notification_stopped(pDevice); ma_device__on_notification_stopped(pDevice);
...@@ -40451,6 +40449,7 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex ...@@ -40451,6 +40449,7 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex
window.miniaudio.device_state.started = $4; window.miniaudio.device_state.started = $4;
/* Device cache for mapping devices to indexes for JavaScript/C interop. */ /* Device cache for mapping devices to indexes for JavaScript/C interop. */
let miniaudio = window.miniaudio;
miniaudio.devices = []; miniaudio.devices = [];
miniaudio.track_device = function(device) { miniaudio.track_device = function(device) {
...@@ -40502,10 +40501,10 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex ...@@ -40502,10 +40501,10 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex
var device = miniaudio.devices[i]; var device = miniaudio.devices[i];
if (device != null && if (device != null &&
device.webaudio != null && device.webaudio != null &&
device.state === window.miniaudio.device_state.started) { device.state === miniaudio.device_state.started) {
device.webaudio.resume().then(() => { device.webaudio.resume().then(() => {
Module._ma_device__on_notification_unlocked(device.pDevice); _ma_device__on_notification_unlocked(device.pDevice);
}, },
(error) => {console.error("Failed to resume audiocontext", error); (error) => {console.error("Failed to resume audiocontext", error);
}); });
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment