Commit 810cdc23 authored by David Reid's avatar David Reid

Improvements to Audio Worklets support for Web Audio.

Public issue https://github.com/mackron/miniaudio/issues/597
parent 53907863
...@@ -11,6 +11,10 @@ path like "C:\emsdk\emsdk_env.bat". Note that PowerShell doesn't work for me for ...@@ -11,6 +11,10 @@ path like "C:\emsdk\emsdk_env.bat". Note that PowerShell doesn't work for me for
emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html
emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html -s WASM=0 -Wall -Wextra emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html -s WASM=0 -Wall -Wextra
To compile with support for Audio Worklets:
emcc ../simple_playback_sine.c -o bin/simple_playback_sine.html -DMA_ENABLE_AUDIO_WORKLETS -sAUDIO_WORKLET=1 -sWASM_WORKERS=1 -sASYNCIFY
If you output WASM it may not work when running the web page locally. To test you can run with something If you output WASM it may not work when running the web page locally. To test you can run with something
like this: like this:
......
...@@ -39879,6 +39879,11 @@ static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const ...@@ -39879,6 +39879,11 @@ static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const
(void)paramCount; (void)paramCount;
(void)pParams; (void)pParams;
if (ma_device_get_state(pDevice) != ma_device_state_started) {
return EM_TRUE;
}
/* /*
The Emscripten documentation says that it'll always be 128 frames being passed in. Hard coding it like that feels The Emscripten documentation says that it'll always be 128 frames being passed in. Hard coding it like that feels
like a very bad idea to me. Even if it's hard coded in the backend, the API and documentation should always refer like a very bad idea to me. Even if it's hard coded in the backend, the API and documentation should always refer
...@@ -39895,8 +39900,8 @@ static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const ...@@ -39895,8 +39900,8 @@ static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const
ma_uint32 framesToProcessThisIteration = frameCount - framesProcessed; ma_uint32 framesToProcessThisIteration = frameCount - framesProcessed;
if (inputCount > 0) { if (inputCount > 0) {
if (framesToProcessThisIteration > pDevice->webaudio.intermediaryBufferSizeInFramesPlayback) { if (framesToProcessThisIteration > pDevice->webaudio.intermediaryBufferSizeInFramesCapture) {
framesToProcessThisIteration = pDevice->webaudio.intermediaryBufferSizeInFramesPlayback; framesToProcessThisIteration = pDevice->webaudio.intermediaryBufferSizeInFramesCapture;
} }
/* Input data needs to be interleaved before we hand it to the client. */ /* Input data needs to be interleaved before we hand it to the client. */
...@@ -39907,9 +39912,7 @@ static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const ...@@ -39907,9 +39912,7 @@ static EM_BOOL ma_audio_worklet_process_callback__webaudio(int inputCount, const
} }
ma_device_process_pcm_frames_capture__webaudio(pDevice, framesToProcessThisIteration, pDevice->webaudio.pIntermediaryBufferCapture); ma_device_process_pcm_frames_capture__webaudio(pDevice, framesToProcessThisIteration, pDevice->webaudio.pIntermediaryBufferCapture);
} } else if (outputCount > 0) {
if (outputCount > 0) {
ma_device_process_pcm_frames_playback__webaudio(pDevice, framesToProcessThisIteration, pDevice->webaudio.pIntermediaryBufferPlayback); ma_device_process_pcm_frames_playback__webaudio(pDevice, framesToProcessThisIteration, pDevice->webaudio.pIntermediaryBufferPlayback);
/* We've read the data from the client. Now we need to deinterleave the buffer and output to the output buffer. */ /* We've read the data from the client. Now we need to deinterleave the buffer and output to the output buffer. */
...@@ -39979,7 +39982,7 @@ static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T a ...@@ -39979,7 +39982,7 @@ static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T a
*/ */
}) })
.catch(function(error) { .catch(function(error) {
console.log("navigator.mediaDevices.getUserMedia Failed: " + error);
}); });
}, workletNode, audioContext); }, workletNode, audioContext);
} else { } else {
...@@ -40046,13 +40049,29 @@ static ma_result ma_device_init_by_type__webaudio(ma_device* pDevice, const ma_d ...@@ -40046,13 +40049,29 @@ static ma_result ma_device_init_by_type__webaudio(ma_device* pDevice, const ma_d
ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "periodSizeInFrames = %d\n", (int)periodSizeInFrames); ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "periodSizeInFrames = %d\n", (int)periodSizeInFrames);
#if defined(MA_USE_AUDIO_WORKLETS) #if defined(MA_USE_AUDIO_WORKLETS)
{ {
ma_audio_worklet_thread_initialized_data* pInitParameters; ma_audio_worklet_thread_initialized_data* pInitParameters;
EmscriptenWebAudioCreateAttributes audioContextAttributes; EmscriptenWebAudioCreateAttributes audioContextAttributes;
audioContextAttributes.latencyHint = MA_WEBAUDIO_LATENCY_HINT_INTERACTIVE; if (pConfig->performanceProfile == ma_performance_profile_conservative) {
audioContextAttributes.sampleRate = sampleRate; audioContextAttributes.latencyHint = MA_WEBAUDIO_LATENCY_HINT_PLAYBACK;
} else {
audioContextAttributes.latencyHint = MA_WEBAUDIO_LATENCY_HINT_INTERACTIVE;
}
/*
In my testing, Firefox does not seem to capture audio data properly if the sample rate is set
to anything other than 48K. This does not seem to be the case for other browsers. For this reason,
if the device type is anything other than playback, we'll leave the sample rate as-is and let the
browser pick the appropriate rate for us.
*/
if (deviceType == ma_device_type_playback) {
audioContextAttributes.sampleRate = sampleRate;
} else {
audioContextAttributes.sampleRate = 0;
}
/* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */ /* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */
audioContext = emscripten_create_audio_context(&audioContextAttributes); audioContext = emscripten_create_audio_context(&audioContextAttributes);
...@@ -40128,211 +40147,216 @@ static ma_result ma_device_init_by_type__webaudio(ma_device* pDevice, const ma_d ...@@ -40128,211 +40147,216 @@ static ma_result ma_device_init_by_type__webaudio(ma_device* pDevice, const ma_d
}, audioContext); }, audioContext);
} }
ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "TRACE: INITIALIZED. channels = %u\n", channels); /* Grab the sample rate straight from the context. */
} sampleRate = (ma_uint32)EM_ASM_INT({
#else return emscriptenGetAudioObject($0).sampleRate;
/* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */ }, audioContext);
int deviceIndex = EM_ASM_INT({
var channels = $0;
var sampleRate = $1;
var bufferSize = $2; /* In PCM frames. */
var isCapture = $3;
var pDevice = $4;
var pAllocationCallbacks = $5;
if (typeof(window.miniaudio) === 'undefined') { ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "TRACE: INITIALIZED. channels = %u; sampleRate = %u\n", channels, sampleRate);
return -1; /* Context not initialized. */
}
var device = {}; if (deviceType == ma_device_type_capture) {
pDevice->webaudio.audioContextCapture = audioContext;
pDevice->webaudio.pStackBufferCapture = pStackBuffer;
pDevice->webaudio.intermediaryBufferSizeInFramesCapture = intermediaryBufferSizeInFrames;
pDevice->webaudio.pIntermediaryBufferCapture = pIntermediaryBuffer;
} else {
pDevice->webaudio.audioContextPlayback = audioContext;
pDevice->webaudio.pStackBufferPlayback = pStackBuffer;
pDevice->webaudio.intermediaryBufferSizeInFramesPlayback = intermediaryBufferSizeInFrames;
pDevice->webaudio.pIntermediaryBufferPlayback = pIntermediaryBuffer;
}
}
#else
{
/* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */
int deviceIndex = EM_ASM_INT({
var channels = $0;
var sampleRate = $1;
var bufferSize = $2; /* In PCM frames. */
var isCapture = $3;
var pDevice = $4;
var pAllocationCallbacks = $5;
/* The AudioContext must be created in a suspended state. */ if (typeof(window.miniaudio) === 'undefined') {
device.webaudio = new (window.AudioContext || window.webkitAudioContext)({sampleRate:sampleRate}); return -1; /* Context not initialized. */
device.webaudio.suspend(); }
device.state = 1; /* ma_device_state_stopped */
/* We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. */ var device = {};
device.intermediaryBufferSizeInBytes = channels * bufferSize * 4;
device.intermediaryBuffer = _ma_malloc_emscripten(device.intermediaryBufferSizeInBytes, pAllocationCallbacks);
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
/* /*
Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations. When testing in Firefox, I've seen it where capture mode fails if the sample rate is changed to anything other than it's
native rate. For this reason we're leaving the sample rate untouched for capture devices.
ScriptProcessorNode is actually deprecated so this is likely to be temporary. The way this works for playback is very simple. You just set a callback */
that's periodically fired, just like a normal audio callback function. But apparently this design is "flawed" and is now deprecated in favour of var options = {};
something called AudioWorklets which _forces_ you to load a _separate_ .js file at run time... nice... Hopefully ScriptProcessorNode will continue to if (!isCapture) {
work for years to come, but this may need to change to use AudioSourceBufferNode instead, which I think is what Emscripten uses for it's built-in SDL options.sampleRate = sampleRate;
implementation. I'll be avoiding that insane AudioWorklet API like the plague... }
For capture it is a bit unintuitive. We use the ScriptProccessorNode _only_ to get the raw PCM data. It is connected to an AudioContext just like the
playback case, however we just output silence to the AudioContext instead of passing any real data. It would make more sense to me to use the
MediaRecorder API, but unfortunately you need to specify a MIME time (Opus, Vorbis, etc.) for the binary blob that's returned to the client, but I've
been unable to figure out how to get this as raw PCM. The closest I can think is to use the MIME type for WAV files and just parse it, but I don't know
how well this would work. Although ScriptProccessorNode is deprecated, in practice it seems to have pretty good browser support so I'm leaving it like
this for now. If anyone knows how I could get raw PCM data using the MediaRecorder API please let me know!
*/
device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, (isCapture) ? channels : 0, (isCapture) ? 0 : channels);
if (isCapture) { device.webaudio = new (window.AudioContext || window.webkitAudioContext)(options);
device.scriptNode.onaudioprocess = function(e) { device.webaudio.suspend(); /* The AudioContext must be created in a suspended state. */
if (device.intermediaryBuffer === undefined) { device.state = 1; /* ma_device_state_stopped */
return; /* This means the device has been uninitialized. */
}
if (device.intermediaryBufferView.length == 0) { /* We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. */
/* Recreate intermediaryBufferView when losing reference to the underlying buffer, probably due to emscripten resizing heap. */ device.intermediaryBufferSizeInBytes = channels * bufferSize * 4;
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes); device.intermediaryBuffer = _ma_malloc_emscripten(device.intermediaryBufferSizeInBytes, pAllocationCallbacks);
} device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
/* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */ /*
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations which is deprecated, but
e.outputBuffer.getChannelData(iChannel).fill(0.0); thanks to Emscripten it can now work nicely with miniaudio. Therefore, this code will be considered legacy once the
} AudioWorklet implementation is enabled by default in miniaudio.
The use of ScriptProcessorNode is quite simple - you simply provide a callback and do your audio processing in there. For
capture it's slightly unintuitive because you need to attach your node to the destination in order to capture anything.
Therefore, the output channel count needs to be set for capture devices or else you'll get errors by the browser. In the
callback we just output silence to ensure nothing comes out of the speakers.
*/
device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, (isCapture) ? channels : 0, channels); /* Always set the output channel count, even for capture mode. */
/* There are some situations where we may want to send silence to the client. */ if (isCapture) {
var sendSilence = false; device.scriptNode.onaudioprocess = function(e) {
if (device.streamNode === undefined) { if (device.intermediaryBuffer === undefined) {
sendSilence = true; return; /* This means the device has been uninitialized. */
} }
/* Sanity check. This will never happen, right? */ if (device.intermediaryBufferView.length == 0) {
if (e.inputBuffer.numberOfChannels != channels) { /* Recreate intermediaryBufferView when losing reference to the underlying buffer, probably due to emscripten resizing heap. */
console.log("Capture: Channel count mismatch. " + e.inputBufer.numberOfChannels + " != " + channels + ". Sending silence."); device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
sendSilence = true; }
}
/* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */ /* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */
var totalFramesProcessed = 0; for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
while (totalFramesProcessed < e.inputBuffer.length) { e.outputBuffer.getChannelData(iChannel).fill(0.0);
var framesRemaining = e.inputBuffer.length - totalFramesProcessed;
var framesToProcess = framesRemaining;
if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
} }
/* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */ /* There are some situations where we may want to send silence to the client. */
if (sendSilence) { var sendSilence = false;
device.intermediaryBufferView.fill(0.0); if (device.streamNode === undefined) {
} else { sendSilence = true;
for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) {
device.intermediaryBufferView[iFrame*channels + iChannel] = e.inputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame];
}
}
} }
/* Send data to the client from our intermediary buffer. */ /* Sanity check. This will never happen, right? */
_ma_device_process_pcm_frames_capture__webaudio(pDevice, framesToProcess, device.intermediaryBuffer); if (e.inputBuffer.numberOfChannels != channels) {
console.log("Capture: Channel count mismatch. " + e.inputBufer.numberOfChannels + " != " + channels + ". Sending silence.");
sendSilence = true;
}
totalFramesProcessed += framesToProcess; /* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */
} var totalFramesProcessed = 0;
}; while (totalFramesProcessed < e.inputBuffer.length) {
var framesRemaining = e.inputBuffer.length - totalFramesProcessed;
var framesToProcess = framesRemaining;
if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
}
navigator.mediaDevices.getUserMedia({audio:true, video:false}) /* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */
.then(function(stream) { if (sendSilence) {
device.streamNode = device.webaudio.createMediaStreamSource(stream); device.intermediaryBufferView.fill(0.0);
device.streamNode.connect(device.scriptNode); } else {
device.scriptNode.connect(device.webaudio.destination); for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
}) for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) {
.catch(function(error) { device.intermediaryBufferView[iFrame*channels + iChannel] = e.inputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame];
/* I think this should output silence... */ }
device.scriptNode.connect(device.webaudio.destination); }
}); }
} else {
device.scriptNode.onaudioprocess = function(e) {
if (device.intermediaryBuffer === undefined) {
return; /* This means the device has been uninitialized. */
}
if(device.intermediaryBufferView.length == 0) { /* Send data to the client from our intermediary buffer. */
/* Recreate intermediaryBufferView when losing reference to the underlying buffer, probably due to emscripten resizing heap. */ _ma_device_process_pcm_frames_capture__webaudio(pDevice, framesToProcess, device.intermediaryBuffer);
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
}
var outputSilence = false; totalFramesProcessed += framesToProcess;
}
};
/* Sanity check. This will never happen, right? */ navigator.mediaDevices.getUserMedia({audio:true, video:false})
if (e.outputBuffer.numberOfChannels != channels) { .then(function(stream) {
console.log("Playback: Channel count mismatch. " + e.outputBufer.numberOfChannels + " != " + channels + ". Outputting silence."); device.streamNode = device.webaudio.createMediaStreamSource(stream);
outputSilence = true; device.streamNode.connect(device.scriptNode);
return; device.scriptNode.connect(device.webaudio.destination);
} })
.catch(function(error) {
/* I think this should output silence... */
device.scriptNode.connect(device.webaudio.destination);
});
} else {
device.scriptNode.onaudioprocess = function(e) {
if (device.intermediaryBuffer === undefined) {
return; /* This means the device has been uninitialized. */
}
/* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */ if(device.intermediaryBufferView.length == 0) {
var totalFramesProcessed = 0; /* Recreate intermediaryBufferView when losing reference to the underlying buffer, probably due to emscripten resizing heap. */
while (totalFramesProcessed < e.outputBuffer.length) { device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
var framesRemaining = e.outputBuffer.length - totalFramesProcessed;
var framesToProcess = framesRemaining;
if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
} }
/* Read data from the client into our intermediary buffer. */ var outputSilence = false;
_ma_device_process_pcm_frames_playback__webaudio(pDevice, framesToProcess, device.intermediaryBuffer);
/* At this point we'll have data in our intermediary buffer which we now need to deinterleave and copy over to the output buffers. */ /* Sanity check. This will never happen, right? */
if (outputSilence) { if (e.outputBuffer.numberOfChannels != channels) {
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { console.log("Playback: Channel count mismatch. " + e.outputBufer.numberOfChannels + " != " + channels + ". Outputting silence.");
e.outputBuffer.getChannelData(iChannel).fill(0.0); outputSilence = true;
return;
}
/* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */
var totalFramesProcessed = 0;
while (totalFramesProcessed < e.outputBuffer.length) {
var framesRemaining = e.outputBuffer.length - totalFramesProcessed;
var framesToProcess = framesRemaining;
if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4);
} }
} else {
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { /* Read data from the client into our intermediary buffer. */
var outputBuffer = e.outputBuffer.getChannelData(iChannel); _ma_device_process_pcm_frames_playback__webaudio(pDevice, framesToProcess, device.intermediaryBuffer);
var intermediaryBuffer = device.intermediaryBufferView;
for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) { /* At this point we'll have data in our intermediary buffer which we now need to deinterleave and copy over to the output buffers. */
outputBuffer[totalFramesProcessed + iFrame] = intermediaryBuffer[iFrame*channels + iChannel]; if (outputSilence) {
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
e.outputBuffer.getChannelData(iChannel).fill(0.0);
}
} else {
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
var outputBuffer = e.outputBuffer.getChannelData(iChannel);
var intermediaryBuffer = device.intermediaryBufferView;
for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
outputBuffer[totalFramesProcessed + iFrame] = intermediaryBuffer[iFrame*channels + iChannel];
}
} }
} }
totalFramesProcessed += framesToProcess;
} }
};
totalFramesProcessed += framesToProcess; device.scriptNode.connect(device.webaudio.destination);
} }
};
device.scriptNode.connect(device.webaudio.destination); return miniaudio.track_device(device);
} }, channels, sampleRate, periodSizeInFrames, deviceType == ma_device_type_capture, pDevice, &pDevice->pContext->allocationCallbacks);
return miniaudio.track_device(device); if (deviceIndex < 0) {
}, channels, sampleRate, periodSizeInFrames, deviceType == ma_device_type_capture, pDevice, &pDevice->pContext->allocationCallbacks); return MA_FAILED_TO_OPEN_BACKEND_DEVICE;
}
if (deviceIndex < 0) { if (deviceType == ma_device_type_capture) {
return MA_FAILED_TO_OPEN_BACKEND_DEVICE; pDevice->webaudio.indexCapture = deviceIndex;
} } else {
#endif pDevice->webaudio.indexPlayback = deviceIndex;
}
#if defined(MA_USE_AUDIO_WORKLETS) /* Grab the actual sample rate from the context. This will become the internal sample rate for use by miniaudio. */
if (deviceType == ma_device_type_capture) { sampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
pDevice->webaudio.audioContextCapture = audioContext;
pDevice->webaudio.pStackBufferCapture = pStackBuffer;
pDevice->webaudio.intermediaryBufferSizeInFramesCapture = intermediaryBufferSizeInFrames;
pDevice->webaudio.pIntermediaryBufferCapture = pIntermediaryBuffer;
} else {
pDevice->webaudio.audioContextPlayback = audioContext;
pDevice->webaudio.pStackBufferPlayback = pStackBuffer;
pDevice->webaudio.intermediaryBufferSizeInFramesPlayback = intermediaryBufferSizeInFrames;
pDevice->webaudio.pIntermediaryBufferPlayback = pIntermediaryBuffer;
}
#else
if (deviceType == ma_device_type_capture) {
pDevice->webaudio.indexCapture = deviceIndex;
} else {
pDevice->webaudio.indexPlayback = deviceIndex;
} }
#endif #endif
pDescriptor->format = ma_format_f32; pDescriptor->format = ma_format_f32;
pDescriptor->channels = channels; pDescriptor->channels = channels;
pDescriptor->sampleRate = sampleRate;
ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), pDescriptor->channels); ma_channel_map_init_standard(ma_standard_channel_map_webaudio, pDescriptor->channelMap, ma_countof(pDescriptor->channelMap), pDescriptor->channels);
pDescriptor->periodSizeInFrames = periodSizeInFrames; pDescriptor->periodSizeInFrames = periodSizeInFrames;
pDescriptor->periodCount = 1; pDescriptor->periodCount = 1;
#if defined(MA_USE_AUDIO_WORKLETS)
pDescriptor->sampleRate = sampleRate; /* Is this good enough to be used in the general case? */
#else
pDescriptor->sampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
#endif
return MA_SUCCESS; return MA_SUCCESS;
} }
...@@ -74184,7 +74208,7 @@ static void ma_engine_node_process_pcm_frames__general(ma_engine_node* pEngineNo ...@@ -74184,7 +74208,7 @@ static void ma_engine_node_process_pcm_frames__general(ma_engine_node* pEngineNo
float fadeVolumeBeg = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeBeg); float fadeVolumeBeg = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeBeg);
float fadeVolumeEnd = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeEnd); float fadeVolumeEnd = ma_atomic_float_get(&pEngineNode->fadeSettings.volumeEnd);
ma_int64 fadeStartOffsetInFrames = (ma_int64)ma_atomic_uint64_get(&pEngineNode->fadeSettings.absoluteGlobalTimeInFrames); ma_int64 fadeStartOffsetInFrames = (ma_int64)ma_atomic_uint64_get(&pEngineNode->fadeSettings.absoluteGlobalTimeInFrames);
if (fadeStartOffsetInFrames == ~(ma_uint64)0) { if (fadeStartOffsetInFrames == (ma_int64)(~(ma_uint64)0)) {
fadeStartOffsetInFrames = 0; fadeStartOffsetInFrames = 0;
} else { } else {
fadeStartOffsetInFrames -= ma_engine_get_time(pEngineNode->pEngine); fadeStartOffsetInFrames -= ma_engine_get_time(pEngineNode->pEngine);
#define MA_DEBUG_OUTPUT
#define MA_NO_DECODING #define MA_NO_DECODING
#define MA_NO_ENCODING #define MA_NO_ENCODING
#define MINIAUDIO_IMPLEMENTATION #define MINIAUDIO_IMPLEMENTATION
...@@ -73,12 +74,13 @@ static void do_duplex() ...@@ -73,12 +74,13 @@ static void do_duplex()
deviceConfig = ma_device_config_init(ma_device_type_duplex); deviceConfig = ma_device_config_init(ma_device_type_duplex);
deviceConfig.capture.pDeviceID = NULL; deviceConfig.capture.pDeviceID = NULL;
deviceConfig.capture.format = ma_format_s16; deviceConfig.capture.format = DEVICE_FORMAT;
deviceConfig.capture.channels = 2; deviceConfig.capture.channels = 2;
deviceConfig.capture.shareMode = ma_share_mode_shared; deviceConfig.capture.shareMode = ma_share_mode_shared;
deviceConfig.playback.pDeviceID = NULL; deviceConfig.playback.pDeviceID = NULL;
deviceConfig.playback.format = ma_format_s16; deviceConfig.playback.format = DEVICE_FORMAT;
deviceConfig.playback.channels = 2; deviceConfig.playback.channels = 2;
deviceConfig.sampleRate = DEVICE_SAMPLE_RATE;
deviceConfig.dataCallback = data_callback_duplex; deviceConfig.dataCallback = data_callback_duplex;
result = ma_device_init(NULL, &deviceConfig, &device); result = ma_device_init(NULL, &deviceConfig, &device);
if (result != MA_SUCCESS) { if (result != MA_SUCCESS) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment