/* Grab the sample rate straight from the context. */
}
sampleRate = (ma_uint32)EM_ASM_INT({
#else
return emscriptenGetAudioObject($0).sampleRate;
/* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */
/* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */
int deviceIndex = EM_ASM_INT({
var channels = $0;
var sampleRate = $1;
var bufferSize = $2; /* In PCM frames. */
var isCapture = $3;
var pDevice = $4;
var pAllocationCallbacks = $5;
/* The AudioContext must be created in a suspended state. */
if (typeof(window.miniaudio) === 'undefined') {
device.webaudio = new (window.AudioContext || window.webkitAudioContext)({sampleRate:sampleRate});
return -1; /* Context not initialized. */
device.webaudio.suspend();
}
device.state = 1; /* ma_device_state_stopped */
/* We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. */
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
/*
/*
Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations.
When testing in Firefox, I've seen it where capture mode fails if the sample rate is changed to anything other than it's
native rate. For this reason we're leaving the sample rate untouched for capture devices.
ScriptProcessorNode is actually deprecated so this is likely to be temporary. The way this works for playback is very simple. You just set a callback
*/
that's periodically fired, just like a normal audio callback function. But apparently this design is "flawed" and is now deprecated in favour of
var options = {};
something called AudioWorklets which _forces_ you to load a _separate_ .js file at run time... nice... Hopefully ScriptProcessorNode will continue to
if (!isCapture) {
work for years to come, but this may need to change to use AudioSourceBufferNode instead, which I think is what Emscripten uses for it's built-in SDL
options.sampleRate = sampleRate;
implementation. I'll be avoiding that insane AudioWorklet API like the plague...
}
For capture it is a bit unintuitive. We use the ScriptProccessorNode _only_ to get the raw PCM data. It is connected to an AudioContext just like the
playback case, however we just output silence to the AudioContext instead of passing any real data. It would make more sense to me to use the
MediaRecorder API, but unfortunately you need to specify a MIME time (Opus, Vorbis, etc.) for the binary blob that's returned to the client, but I've
been unable to figure out how to get this as raw PCM. The closest I can think is to use the MIME type for WAV files and just parse it, but I don't know
how well this would work. Although ScriptProccessorNode is deprecated, in practice it seems to have pretty good browser support so I'm leaving it like
this for now. If anyone knows how I could get raw PCM data using the MediaRecorder API please let me know!
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
sendSilence = true;
}
}
/* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */
/* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */
var totalFramesProcessed = 0;
for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
while (totalFramesProcessed < e.inputBuffer.length) {
/* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */
/* There are some situations where we may want to send silence to the client. */
if (sendSilence) {
var sendSilence = false;
device.intermediaryBufferView.fill(0.0);
if (device.streamNode === undefined) {
} else {
sendSilence = true;
for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) {
for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) {
/* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */
}
var totalFramesProcessed = 0;
};
while (totalFramesProcessed < e.inputBuffer.length) {
var framesRemaining = e.inputBuffer.length - totalFramesProcessed;
var framesToProcess = framesRemaining;
if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
/* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */
return; /* This means the device has been uninitialized. */
}
/* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */
if(device.intermediaryBufferView.length == 0) {
var totalFramesProcessed = 0;
/* Recreate intermediaryBufferView when losing reference to the underlying buffer, probably due to emscripten resizing heap. */
while (totalFramesProcessed < e.outputBuffer.length) {
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes);
var framesRemaining = e.outputBuffer.length - totalFramesProcessed;
var framesToProcess = framesRemaining;
if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {
/* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */
var totalFramesProcessed = 0;
while (totalFramesProcessed < e.outputBuffer.length) {
var framesRemaining = e.outputBuffer.length - totalFramesProcessed;
var framesToProcess = framesRemaining;
if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) {