Commit 95982470 authored by David Reid's avatar David Reid

Remove old files and update examples.

parent ed22ebbf
......@@ -8,9 +8,9 @@ example (via libopus).
#define MA_NO_VORBIS /* Disable the built-in Vorbis decoder to ensure the libvorbis decoder is picked. */
#define MA_NO_OPUS /* Disable the (not yet implemented) built-in Opus decoder to ensure the libopus decoder is picked. */
#define MINIAUDIO_IMPLEMENTATION
#include "../../miniaudio.h"
#include "../../extras/miniaudio_libvorbis.h"
#include "../../extras/miniaudio_libopus.h"
#include "../miniaudio.h"
#include "../extras/miniaudio_libvorbis.h"
#include "../extras/miniaudio_libopus.h"
#include <stdio.h>
......
......@@ -7,9 +7,8 @@ the miniaudio repository. The vocoder node uses https://github.com/blastbay/vocl
effect.
*/
#define MINIAUDIO_IMPLEMENTATION
#include "../../miniaudio.h"
#include "../miniaudio_engine.h"
#include "../_extras/nodes/ma_vocoder_node/ma_vocoder_node.c"
#include "../miniaudio.h"
#include "../extras/nodes/ma_vocoder_node/ma_vocoder_node.c"
#include <stdio.h>
......
......@@ -14,8 +14,7 @@ If you were wanting to support multiple listeners, this example will show you ho
initializing one `ma_engine` object for each listener, each of which share a single self-managed resource manager.
*/
#define MINIAUDIO_IMPLEMENTATION
#include "../../miniaudio.h"
#include "../miniaudio_engine.h"
#include "../miniaudio.h"
#define MAX_DEVICES 2
#define MAX_SOUNDS 32
......
......@@ -25,8 +25,7 @@ set, each sound will have their own formats and you'll need to do the necessary
*/
#define MA_NO_ENGINE /* We're intentionally not using the ma_engine API here. */
#define MINIAUDIO_IMPLEMENTATION
#include "../../miniaudio.h"
#include "../miniaudio_engine.h"
#include "../miniaudio.h"
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
......
......@@ -16,8 +16,7 @@ threads to manage internally and how to implement your own custom job thread.
*/
#define MA_NO_ENGINE /* We're intentionally not using the ma_engine API here. */
#define MINIAUDIO_IMPLEMENTATION
#include "../../miniaudio.h"
#include "../miniaudio_engine.h"
#include "../miniaudio.h"
static ma_resource_manager_data_source g_dataSources[16];
static ma_uint32 g_dataSourceCount;
......
#define MINIAUDIO_IMPLEMENTATION
#include "../../../../miniaudio.h"
#include "../../../miniaudio_engine.h"
#include "../../../miniaudio.h"
#include "ma_channel_separator_node.c"
#include "../ma_channel_combiner_node/ma_channel_combiner_node.c"
......
#define MINIAUDIO_IMPLEMENTATION
#include "../../../../miniaudio.h"
#include "../../../miniaudio_engine.h"
#include "../../../miniaudio.h"
#include <stdio.h>
......
#define MINIAUDIO_IMPLEMENTATION
#include "../../../../miniaudio.h"
#include "../../../miniaudio_engine.h"
#include "../../../miniaudio.h"
#include "ma_reverb_node.c"
#include <stdio.h>
......
......@@ -7,8 +7,7 @@ the miniaudio repository. The vocoder node uses https://github.com/blastbay/vocl
effect.
*/
#define MINIAUDIO_IMPLEMENTATION
#include "../../../../miniaudio.h"
#include "../../../miniaudio_engine.h"
#include "../../../miniaudio.h"
#include "ma_vocoder_node.c"
#include <stdio.h>
......
#include "../../examples/custom_decoder_engine.c"
\ No newline at end of file
#include "../../examples/duplex_effect.c"
\ No newline at end of file
#include "../../examples/engine_hello_world.c"
\ No newline at end of file
#include "../../examples/resource_manager.c"
\ No newline at end of file
#include "../../examples/resource_manager_advanced.c"
\ No newline at end of file
#include "../../../../extras/nodes/ma_channel_combiner_node/ma_channel_combiner_node.c"
\ No newline at end of file
#include "../../../../extras/nodes/ma_channel_combiner_node/ma_channel_combiner_node.h"
\ No newline at end of file
#include "../../../../extras/nodes/ma_channel_combiner_node/ma_channel_combiner_node_example.c"
#include "../../../../extras/nodes/ma_channel_separator_node/ma_channel_separator_node.c"
\ No newline at end of file
#include "../../../../extras/nodes/ma_channel_separator_node/ma_channel_separator_node.h"
\ No newline at end of file
#include "../../../../extras/nodes/ma_channel_separator_node/ma_channel_separator_node_example.c"
\ No newline at end of file
#include "../../../../extras/nodes/ma_delay_node/ma_delay_node_example.c"
\ No newline at end of file
#include "../../../../extras/nodes/ma_reverb_node/ma_reverb_node.c"
\ No newline at end of file
#include "../../../../extras/nodes/ma_reverb_node/ma_reverb_node.h"
\ No newline at end of file
#include "../../../../extras/nodes/ma_reverb_node/ma_reverb_node_example.c"
\ No newline at end of file
/* Reverb Library
* Verblib version 0.4 - 2021-01-23
*
* Philip Bennefall - philip@blastbay.com
*
* See the end of this file for licensing terms.
* This reverb is based on Freeverb, a public domain reverb written by Jezar at Dreampoint.
*
* IMPORTANT: The reverb currently only works with 1 or 2 channels, at sample rates of 22050 HZ and above.
* These restrictions may be lifted in a future version.
*
* USAGE
*
* This is a single-file library. To use it, do something like the following in one .c file.
* #define VERBLIB_IMPLEMENTATION
* #include "verblib.h"
*
* You can then #include this file in other parts of the program as you would with any other header file.
*/
#ifndef VERBLIB_H
#define VERBLIB_H
#ifdef __cplusplus
extern "C" {
#endif
/* COMPILE-TIME OPTIONS */
/* The maximum sample rate that should be supported, specified as a multiple of 44100. */
#ifndef verblib_max_sample_rate_multiplier
#define verblib_max_sample_rate_multiplier 4
#endif
/* The silence threshold which is used when calculating decay time. */
#ifndef verblib_silence_threshold
#define verblib_silence_threshold 80.0 /* In dB (absolute). */
#endif
/* PUBLIC API */
typedef struct verblib verblib;
/* Initialize a verblib structure.
*
* Call this function to initialize the verblib structure.
* Returns nonzero (true) on success or 0 (false) on failure.
* The function will only fail if one or more of the parameters are invalid.
*/
int verblib_initialize ( verblib* verb, unsigned long sample_rate, unsigned int channels );
/* Run the reverb.
*
* Call this function continuously to generate your output.
* output_buffer may be the same pointer as input_buffer if in place processing is desired.
* frames specifies the number of sample frames that should be processed.
*/
void verblib_process ( verblib* verb, const float* input_buffer, float* output_buffer, unsigned long frames );
/* Set the size of the room, between 0.0 and 1.0. */
void verblib_set_room_size ( verblib* verb, float value );
/* Get the size of the room. */
float verblib_get_room_size ( const verblib* verb );
/* Set the amount of damping, between 0.0 and 1.0. */
void verblib_set_damping ( verblib* verb, float value );
/* Get the amount of damping. */
float verblib_get_damping ( const verblib* verb );
/* Set the stereo width of the reverb, between 0.0 and 1.0. */
void verblib_set_width ( verblib* verb, float value );
/* Get the stereo width of the reverb. */
float verblib_get_width ( const verblib* verb );
/* Set the volume of the wet signal, between 0.0 and 1.0. */
void verblib_set_wet ( verblib* verb, float value );
/* Get the volume of the wet signal. */
float verblib_get_wet ( const verblib* verb );
/* Set the volume of the dry signal, between 0.0 and 1.0. */
void verblib_set_dry ( verblib* verb, float value );
/* Get the volume of the dry signal. */
float verblib_get_dry ( const verblib* verb );
/* Set the mode of the reverb, where values below 0.5 mean normal and values above mean frozen. */
void verblib_set_mode ( verblib* verb, float value );
/* Get the mode of the reverb. */
float verblib_get_mode ( const verblib* verb );
/* Get the decay time in sample frames based on the current room size setting. */
/* If freeze mode is active, the decay time is infinite and this function returns 0. */
unsigned long verblib_get_decay_time_in_frames ( const verblib* verb );
/* INTERNAL STRUCTURES */
/* Allpass filter */
typedef struct verblib_allpass verblib_allpass;
struct verblib_allpass
{
float* buffer;
float feedback;
int bufsize;
int bufidx;
};
/* Comb filter */
typedef struct verblib_comb verblib_comb;
struct verblib_comb
{
float* buffer;
float feedback;
float filterstore;
float damp1;
float damp2;
int bufsize;
int bufidx;
};
/* Reverb model tuning values */
#define verblib_numcombs 8
#define verblib_numallpasses 4
#define verblib_muted 0.0f
#define verblib_fixedgain 0.015f
#define verblib_scalewet 3.0f
#define verblib_scaledry 2.0f
#define verblib_scaledamp 0.8f
#define verblib_scaleroom 0.28f
#define verblib_offsetroom 0.7f
#define verblib_initialroom 0.5f
#define verblib_initialdamp 0.25f
#define verblib_initialwet 1.0f/verblib_scalewet
#define verblib_initialdry 0.0f
#define verblib_initialwidth 1.0f
#define verblib_initialmode 0.0f
#define verblib_freezemode 0.5f
#define verblib_stereospread 23
/*
* These values assume 44.1KHz sample rate, but will be verblib_scaled appropriately.
* The values were obtained by listening tests.
*/
#define verblib_combtuningL1 1116
#define verblib_combtuningR1 (1116+verblib_stereospread)
#define verblib_combtuningL2 1188
#define verblib_combtuningR2 (1188+verblib_stereospread)
#define verblib_combtuningL3 1277
#define verblib_combtuningR3 (1277+verblib_stereospread)
#define verblib_combtuningL4 1356
#define verblib_combtuningR4 (1356+verblib_stereospread)
#define verblib_combtuningL5 1422
#define verblib_combtuningR5 (1422+verblib_stereospread)
#define verblib_combtuningL6 1491
#define verblib_combtuningR6 (1491+verblib_stereospread)
#define verblib_combtuningL7 1557
#define verblib_combtuningR7 (1557+verblib_stereospread)
#define verblib_combtuningL8 1617
#define verblib_combtuningR8 (1617+verblib_stereospread)
#define verblib_allpasstuningL1 556
#define verblib_allpasstuningR1 (556+verblib_stereospread)
#define verblib_allpasstuningL2 441
#define verblib_allpasstuningR2 (441+verblib_stereospread)
#define verblib_allpasstuningL3 341
#define verblib_allpasstuningR3 (341+verblib_stereospread)
#define verblib_allpasstuningL4 225
#define verblib_allpasstuningR4 (225+verblib_stereospread)
/* The main reverb structure. This is the structure that you will create an instance of when using the reverb. */
struct verblib
{
unsigned int channels;
float gain;
float roomsize, roomsize1;
float damp, damp1;
float wet, wet1, wet2;
float dry;
float width;
float mode;
/*
* The following are all declared inline
* to remove the need for dynamic allocation.
*/
/* Comb filters */
verblib_comb combL[verblib_numcombs];
verblib_comb combR[verblib_numcombs];
/* Allpass filters */
verblib_allpass allpassL[verblib_numallpasses];
verblib_allpass allpassR[verblib_numallpasses];
/* Buffers for the combs */
float bufcombL1[verblib_combtuningL1* verblib_max_sample_rate_multiplier];
float bufcombR1[verblib_combtuningR1* verblib_max_sample_rate_multiplier];
float bufcombL2[verblib_combtuningL2* verblib_max_sample_rate_multiplier];
float bufcombR2[verblib_combtuningR2* verblib_max_sample_rate_multiplier];
float bufcombL3[verblib_combtuningL3* verblib_max_sample_rate_multiplier];
float bufcombR3[verblib_combtuningR3* verblib_max_sample_rate_multiplier];
float bufcombL4[verblib_combtuningL4* verblib_max_sample_rate_multiplier];
float bufcombR4[verblib_combtuningR4* verblib_max_sample_rate_multiplier];
float bufcombL5[verblib_combtuningL5* verblib_max_sample_rate_multiplier];
float bufcombR5[verblib_combtuningR5* verblib_max_sample_rate_multiplier];
float bufcombL6[verblib_combtuningL6* verblib_max_sample_rate_multiplier];
float bufcombR6[verblib_combtuningR6* verblib_max_sample_rate_multiplier];
float bufcombL7[verblib_combtuningL7* verblib_max_sample_rate_multiplier];
float bufcombR7[verblib_combtuningR7* verblib_max_sample_rate_multiplier];
float bufcombL8[verblib_combtuningL8* verblib_max_sample_rate_multiplier];
float bufcombR8[verblib_combtuningR8* verblib_max_sample_rate_multiplier];
/* Buffers for the allpasses */
float bufallpassL1[verblib_allpasstuningL1* verblib_max_sample_rate_multiplier];
float bufallpassR1[verblib_allpasstuningR1* verblib_max_sample_rate_multiplier];
float bufallpassL2[verblib_allpasstuningL2* verblib_max_sample_rate_multiplier];
float bufallpassR2[verblib_allpasstuningR2* verblib_max_sample_rate_multiplier];
float bufallpassL3[verblib_allpasstuningL3* verblib_max_sample_rate_multiplier];
float bufallpassR3[verblib_allpasstuningR3* verblib_max_sample_rate_multiplier];
float bufallpassL4[verblib_allpasstuningL4* verblib_max_sample_rate_multiplier];
float bufallpassR4[verblib_allpasstuningR4* verblib_max_sample_rate_multiplier];
};
#ifdef __cplusplus
}
#endif
#endif /* VERBLIB_H */
/* IMPLEMENTATION */
#ifdef VERBLIB_IMPLEMENTATION
#include <stddef.h>
#include <math.h>
#ifdef _MSC_VER
#define VERBLIB_INLINE __forceinline
#else
#ifdef __GNUC__
#define VERBLIB_INLINE inline __attribute__((always_inline))
#else
#define VERBLIB_INLINE inline
#endif
#endif
#define undenormalise(sample) sample+=1.0f; sample-=1.0f;
/* Allpass filter */
static void verblib_allpass_initialize ( verblib_allpass* allpass, float* buf, int size )
{
allpass->buffer = buf;
allpass->bufsize = size;
allpass->bufidx = 0;
}
static VERBLIB_INLINE float verblib_allpass_process ( verblib_allpass* allpass, float input )
{
float output;
float bufout;
bufout = allpass->buffer[allpass->bufidx];
undenormalise ( bufout );
output = -input + bufout;
allpass->buffer[allpass->bufidx] = input + ( bufout * allpass->feedback );
if ( ++allpass->bufidx >= allpass->bufsize )
{
allpass->bufidx = 0;
}
return output;
}
static void verblib_allpass_mute ( verblib_allpass* allpass )
{
int i;
for ( i = 0; i < allpass->bufsize; i++ )
{
allpass->buffer[i] = 0.0f;
}
}
/* Comb filter */
static void verblib_comb_initialize ( verblib_comb* comb, float* buf, int size )
{
comb->buffer = buf;
comb->bufsize = size;
comb->filterstore = 0.0f;
comb->bufidx = 0;
}
static void verblib_comb_mute ( verblib_comb* comb )
{
int i;
for ( i = 0; i < comb->bufsize; i++ )
{
comb->buffer[i] = 0.0f;
}
}
static void verblib_comb_set_damp ( verblib_comb* comb, float val )
{
comb->damp1 = val;
comb->damp2 = 1.0f - val;
}
static VERBLIB_INLINE float verblib_comb_process ( verblib_comb* comb, float input )
{
float output;
output = comb->buffer[comb->bufidx];
undenormalise ( output );
comb->filterstore = ( output * comb->damp2 ) + ( comb->filterstore * comb->damp1 );
undenormalise ( comb->filterstore );
comb->buffer[comb->bufidx] = input + ( comb->filterstore * comb->feedback );
if ( ++comb->bufidx >= comb->bufsize )
{
comb->bufidx = 0;
}
return output;
}
static void verblib_update ( verblib* verb )
{
/* Recalculate internal values after parameter change. */
int i;
verb->wet1 = verb->wet * ( verb->width / 2.0f + 0.5f );
verb->wet2 = verb->wet * ( ( 1.0f - verb->width ) / 2.0f );
if ( verb->mode >= verblib_freezemode )
{
verb->roomsize1 = 1.0f;
verb->damp1 = 0.0f;
verb->gain = verblib_muted;
}
else
{
verb->roomsize1 = verb->roomsize;
verb->damp1 = verb->damp;
verb->gain = verblib_fixedgain;
}
for ( i = 0; i < verblib_numcombs; i++ )
{
verb->combL[i].feedback = verb->roomsize1;
verb->combR[i].feedback = verb->roomsize1;
verblib_comb_set_damp ( &verb->combL[i], verb->damp1 );
verblib_comb_set_damp ( &verb->combR[i], verb->damp1 );
}
}
static void verblib_mute ( verblib* verb )
{
int i;
if ( verblib_get_mode ( verb ) >= verblib_freezemode )
{
return;
}
for ( i = 0; i < verblib_numcombs; i++ )
{
verblib_comb_mute ( &verb->combL[i] );
verblib_comb_mute ( &verb->combR[i] );
}
for ( i = 0; i < verblib_numallpasses; i++ )
{
verblib_allpass_mute ( &verb->allpassL[i] );
verblib_allpass_mute ( &verb->allpassR[i] );
}
}
static int verblib_get_verblib_scaled_buffer_size ( unsigned long sample_rate, unsigned long value )
{
long double result = ( long double ) sample_rate;
result /= 44100.0;
result = ( ( long double ) value ) * result;
if ( result < 1.0 )
{
result = 1.0;
}
return ( int ) result;
}
int verblib_initialize ( verblib* verb, unsigned long sample_rate, unsigned int channels )
{
int i;
if ( channels != 1 && channels != 2 )
{
return 0; /* Currently supports only 1 or 2 channels. */
}
if ( sample_rate < 22050 )
{
return 0; /* The minimum supported sample rate is 22050 HZ. */
}
else if ( sample_rate > 44100 * verblib_max_sample_rate_multiplier )
{
return 0; /* The sample rate is too high. */
}
verb->channels = channels;
/* Tie the components to their buffers. */
verblib_comb_initialize ( &verb->combL[0], verb->bufcombL1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL1 ) );
verblib_comb_initialize ( &verb->combR[0], verb->bufcombR1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR1 ) );
verblib_comb_initialize ( &verb->combL[1], verb->bufcombL2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL2 ) );
verblib_comb_initialize ( &verb->combR[1], verb->bufcombR2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR2 ) );
verblib_comb_initialize ( &verb->combL[2], verb->bufcombL3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL3 ) );
verblib_comb_initialize ( &verb->combR[2], verb->bufcombR3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR3 ) );
verblib_comb_initialize ( &verb->combL[3], verb->bufcombL4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL4 ) );
verblib_comb_initialize ( &verb->combR[3], verb->bufcombR4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR4 ) );
verblib_comb_initialize ( &verb->combL[4], verb->bufcombL5, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL5 ) );
verblib_comb_initialize ( &verb->combR[4], verb->bufcombR5, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR5 ) );
verblib_comb_initialize ( &verb->combL[5], verb->bufcombL6, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL6 ) );
verblib_comb_initialize ( &verb->combR[5], verb->bufcombR6, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR6 ) );
verblib_comb_initialize ( &verb->combL[6], verb->bufcombL7, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL7 ) );
verblib_comb_initialize ( &verb->combR[6], verb->bufcombR7, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR7 ) );
verblib_comb_initialize ( &verb->combL[7], verb->bufcombL8, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningL8 ) );
verblib_comb_initialize ( &verb->combR[7], verb->bufcombR8, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_combtuningR8 ) );
verblib_allpass_initialize ( &verb->allpassL[0], verb->bufallpassL1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL1 ) );
verblib_allpass_initialize ( &verb->allpassR[0], verb->bufallpassR1, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR1 ) );
verblib_allpass_initialize ( &verb->allpassL[1], verb->bufallpassL2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL2 ) );
verblib_allpass_initialize ( &verb->allpassR[1], verb->bufallpassR2, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR2 ) );
verblib_allpass_initialize ( &verb->allpassL[2], verb->bufallpassL3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL3 ) );
verblib_allpass_initialize ( &verb->allpassR[2], verb->bufallpassR3, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR3 ) );
verblib_allpass_initialize ( &verb->allpassL[3], verb->bufallpassL4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningL4 ) );
verblib_allpass_initialize ( &verb->allpassR[3], verb->bufallpassR4, verblib_get_verblib_scaled_buffer_size ( sample_rate, verblib_allpasstuningR4 ) );
/* Set default values. */
for ( i = 0; i < verblib_numallpasses; i++ )
{
verb->allpassL[i].feedback = 0.5f;
verb->allpassR[i].feedback = 0.5f;
}
verblib_set_wet ( verb, verblib_initialwet );
verblib_set_room_size ( verb, verblib_initialroom );
verblib_set_dry ( verb, verblib_initialdry );
verblib_set_damping ( verb, verblib_initialdamp );
verblib_set_width ( verb, verblib_initialwidth );
verblib_set_mode ( verb, verblib_initialmode );
/* The buffers will be full of rubbish - so we MUST mute them. */
verblib_mute ( verb );
return 1;
}
void verblib_process ( verblib* verb, const float* input_buffer, float* output_buffer, unsigned long frames )
{
int i;
float outL, outR, input;
if ( verb->channels == 1 )
{
while ( frames-- > 0 )
{
outL = 0.0f;
input = ( input_buffer[0] * 2.0f ) * verb->gain;
/* Accumulate comb filters in parallel. */
for ( i = 0; i < verblib_numcombs; i++ )
{
outL += verblib_comb_process ( &verb->combL[i], input );
}
/* Feed through allpasses in series. */
for ( i = 0; i < verblib_numallpasses; i++ )
{
outL = verblib_allpass_process ( &verb->allpassL[i], outL );
}
/* Calculate output REPLACING anything already there. */
output_buffer[0] = outL * verb->wet1 + input_buffer[0] * verb->dry;
/* Increment sample pointers. */
++input_buffer;
++output_buffer;
}
}
else if ( verb->channels == 2 )
{
while ( frames-- > 0 )
{
outL = outR = 0.0f;
input = ( input_buffer[0] + input_buffer[1] ) * verb->gain;
/* Accumulate comb filters in parallel. */
for ( i = 0; i < verblib_numcombs; i++ )
{
outL += verblib_comb_process ( &verb->combL[i], input );
outR += verblib_comb_process ( &verb->combR[i], input );
}
/* Feed through allpasses in series. */
for ( i = 0; i < verblib_numallpasses; i++ )
{
outL = verblib_allpass_process ( &verb->allpassL[i], outL );
outR = verblib_allpass_process ( &verb->allpassR[i], outR );
}
/* Calculate output REPLACING anything already there. */
output_buffer[0] = outL * verb->wet1 + outR * verb->wet2 + input_buffer[0] * verb->dry;
output_buffer[1] = outR * verb->wet1 + outL * verb->wet2 + input_buffer[1] * verb->dry;
/* Increment sample pointers. */
input_buffer += 2;
output_buffer += 2;
}
}
}
void verblib_set_room_size ( verblib* verb, float value )
{
verb->roomsize = ( value * verblib_scaleroom ) + verblib_offsetroom;
verblib_update ( verb );
}
float verblib_get_room_size ( const verblib* verb )
{
return ( verb->roomsize - verblib_offsetroom ) / verblib_scaleroom;
}
void verblib_set_damping ( verblib* verb, float value )
{
verb->damp = value * verblib_scaledamp;
verblib_update ( verb );
}
float verblib_get_damping ( const verblib* verb )
{
return verb->damp / verblib_scaledamp;
}
void verblib_set_wet ( verblib* verb, float value )
{
verb->wet = value * verblib_scalewet;
verblib_update ( verb );
}
float verblib_get_wet ( const verblib* verb )
{
return verb->wet / verblib_scalewet;
}
void verblib_set_dry ( verblib* verb, float value )
{
verb->dry = value * verblib_scaledry;
}
float verblib_get_dry ( const verblib* verb )
{
return verb->dry / verblib_scaledry;
}
void verblib_set_width ( verblib* verb, float value )
{
verb->width = value;
verblib_update ( verb );
}
float verblib_get_width ( const verblib* verb )
{
return verb->width;
}
void verblib_set_mode ( verblib* verb, float value )
{
verb->mode = value;
verblib_update ( verb );
}
float verblib_get_mode ( const verblib* verb )
{
if ( verb->mode >= verblib_freezemode )
{
return 1.0f;
}
return 0.0f;
}
unsigned long verblib_get_decay_time_in_frames ( const verblib* verb )
{
double decay;
if ( verb->mode >= verblib_freezemode )
{
return 0; /* Freeze mode creates an infinite decay. */
}
decay = verblib_silence_threshold / fabs ( -20.0 * log ( 1.0 / verb->roomsize1 ) );
decay *= ( double ) ( verb->combR[7].bufsize * 2 );
return ( unsigned long ) decay;
}
#endif /* VERBLIB_IMPLEMENTATION */
/* REVISION HISTORY
*
* Version 0.4 - 2021-01-23
* Added a function called verblib_get_decay_time_in_frames.
*
* Version 0.3 - 2021-01-18
* Added support for sample rates of 22050 and above.
*
* Version 0.2 - 2021-01-17
* Added support for processing mono audio.
*
* Version 0.1 - 2021-01-17
* Initial release.
*/
/* LICENSE
This software is available under 2 licenses -- choose whichever you prefer.
------------------------------------------------------------------------------
ALTERNATIVE A - MIT No Attribution License
Copyright (c) 2021 Philip Bennefall
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
ALTERNATIVE B - Public Domain (www.unlicense.org)
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------
*/
#include "../../../../extras/nodes/ma_vocoder_node/ma_vocoder_node.c"
\ No newline at end of file
#include "../../../../extras/nodes/ma_vocoder_node/ma_vocoder_node.h"
\ No newline at end of file
#include "../../../../extras/nodes/ma_vocoder_node/ma_vocoder_node_example.c"
\ No newline at end of file
/* Vocoder Library
* Voclib version 1.1 - 2019-02-16
*
* Philip Bennefall - philip@blastbay.com
*
* See the end of this file for licensing terms.
* The filter implementation was derived from public domain code found on musicdsp.org (see the section called "Filters" for more details).
*
* USAGE
*
* This is a single-file library. To use it, do something like the following in one .c file.
* #define VOCLIB_IMPLEMENTATION
* #include "voclib.h"
*
* You can then #include this file in other parts of the program as you would with any other header file.
*/
#ifndef VOCLIB_H
#define VOCLIB_H
#ifdef __cplusplus
extern "C" {
#endif
/* COMPILE-TIME OPTIONS */
/* The maximum number of bands that the vocoder can be initialized with (lower this number to save memory). */
#define VOCLIB_MAX_BANDS 96
/* The maximum number of filters per vocoder band (lower this number to save memory). */
#define VOCLIB_MAX_FILTERS_PER_BAND 8
/* PUBLIC API */
typedef struct voclib_instance voclib_instance;
/* Initialize a voclib_instance structure.
*
* Call this function to initialize the voclib_instance structure.
* bands is the number of bands that the vocoder should use; recommended values are between 12 and 64.
* bands must be between 4 and VOCLIB_MAX_BANDS (inclusive).
* filters_per_band determines the steapness with which the filterbank divides the signal; a value of 6 is recommended.
* filters_per_band must be between 1 and VOCLIB_MAX_FILTERS_PER_BAND (inclusive).
* sample_rate is the number of samples per second in hertz, and should be between 8000 and 192000 (inclusive).
* carrier_channels is the number of channels that the carrier has, and should be between 1 and 2 (inclusive).
* Note: The modulator must always have only one channel.
* Returns nonzero (true) on success or 0 (false) on failure.
* The function will only fail if one or more of the parameters are invalid.
*/
int voclib_initialize ( voclib_instance* instance, unsigned char bands, unsigned char filters_per_band, unsigned int sample_rate, unsigned char carrier_channels );
/* Run the vocoder.
*
* Call this function continuously to generate your output.
* carrier_buffer and modulator_buffer should contain the carrier and modulator signals respectively.
* The modulator must always have one channel.
* If the carrier has two channels, the samples in carrier_buffer must be interleaved.
* output_buffer will be filled with the result, and must be able to hold as many channels as the carrier.
* If the carrier has two channels, the output buffer will be filled with interleaved samples.
* output_buffer may be the same pointer as either carrier_buffer or modulator_buffer as long as it can hold the same number of channels as the carrier.
* The processing is performed in place.
* frames specifies the number of sample frames that should be processed.
* Returns nonzero (true) on success or 0 (false) on failure.
* The function will only fail if one or more of the parameters are invalid.
*/
int voclib_process ( voclib_instance* instance, const float* carrier_buffer, const float* modulator_buffer, float* output_buffer, unsigned int frames );
/* Reset the vocoder sample history.
*
* In order to run smoothly, the vocoder needs to store a few recent samples internally.
* This function resets that internal history. This should only be done if you are processing a new stream.
* Resetting the history in the middle of a stream will cause clicks.
*/
void voclib_reset_history ( voclib_instance* instance );
/* Set the reaction time of the vocoder in seconds.
*
* The reaction time is the time it takes for the vocoder to respond to a volume change in the modulator.
* A value of 0.03 (AKA 30 milliseconds) is recommended for intelligible speech.
* Values lower than about 0.02 will make the output sound raspy and unpleasant.
* Values above 0.2 or so will make the speech hard to understand, but can be used for special effects.
* The value must be between 0.002 and 2.0 (inclusive).
* Returns nonzero (true) on success or 0 (false) on failure.
* The function will only fail if the parameter is invalid.
*/
int voclib_set_reaction_time ( voclib_instance* instance, float reaction_time );
/* Get the current reaction time of the vocoder in seconds. */
float voclib_get_reaction_time ( const voclib_instance* instance );
/* Set the formant shift of the vocoder in octaves.
*
* Formant shifting changes the size of the speaker's head.
* A value of 1.0 leaves the head size unmodified.
* Values lower than 1.0 make the head larger, and values above 1.0 make it smaller.
* The value must be between 0.25 and 4.0 (inclusive).
* Returns nonzero (true) on success or 0 (false) on failure.
* The function will only fail if the parameter is invalid.
*/
int voclib_set_formant_shift ( voclib_instance* instance, float formant_shift );
/* Get the current formant shift of the vocoder in octaves. */
float voclib_get_formant_shift ( const voclib_instance* instance );
/* INTERNAL STRUCTURES */
/* this holds the data required to update samples thru a filter. */
typedef struct
{
float a0, a1, a2, a3, a4;
float x1, x2, y1, y2;
} voclib_biquad;
/* Stores the state required for our envelope follower. */
typedef struct
{
float coef;
float history[4];
} voclib_envelope;
/* Holds a set of filters required for one vocoder band. */
typedef struct
{
voclib_biquad filters[VOCLIB_MAX_FILTERS_PER_BAND];
} voclib_band;
/* The main instance structure. This is the structure that you will create an instance of when using the vocoder. */
struct voclib_instance
{
voclib_band analysis_bands[VOCLIB_MAX_BANDS]; /* The filterbank used for analysis (these are applied to the modulator). */
voclib_envelope analysis_envelopes[VOCLIB_MAX_BANDS]; /* The envelopes used to smooth the analysis bands. */
voclib_band synthesis_bands[VOCLIB_MAX_BANDS * 2]; /* The filterbank used for synthesis (these are applied to the carrier). The second half of the array is only used for stereo carriers. */
float reaction_time; /* In seconds. Higher values make the vocoder respond more slowly to changes in the modulator. */
float formant_shift; /* In octaves. 1.0 is unchanged. */
unsigned int sample_rate; /* In hertz. */
unsigned char bands;
unsigned char filters_per_band;
unsigned char carrier_channels;
};
#ifdef __cplusplus
}
#endif
#endif /* VOCLIB_H */
/* IMPLEMENTATION */
#ifdef VOCLIB_IMPLEMENTATION
#include <math.h>
#include <assert.h>
#ifdef _MSC_VER
#define VOCLIB_INLINE __forceinline
#else
#ifdef __GNUC__
#define VOCLIB_INLINE inline __attribute__((always_inline))
#else
#define VOCLIB_INLINE inline
#endif
#endif
/* Filters
*
* The filter code below was derived from http://www.musicdsp.org/files/biquad.c. The comment at the top of biquad.c file reads:
*
* Simple implementation of Biquad filters -- Tom St Denis
*
* Based on the work
Cookbook formulae for audio EQ biquad filter coefficients
---------------------------------------------------------
by Robert Bristow-Johnson, pbjrbj@viconet.com a.k.a. robert@audioheads.com
* Available on the web at
http://www.smartelectronix.com/musicdsp/text/filters005.txt
* Enjoy.
*
* This work is hereby placed in the public domain for all purposes, whether
* commercial, free [as in speech] or educational, etc. Use the code and please
* give me credit if you wish.
*
* Tom St Denis -- http://tomstdenis.home.dhs.org
*/
#ifndef VOCLIB_M_LN2
#define VOCLIB_M_LN2 0.69314718055994530942
#endif
#ifndef VOCLIB_M_PI
#define VOCLIB_M_PI 3.14159265358979323846
#endif
/* Computes a BiQuad filter on a sample. */
static VOCLIB_INLINE float voclib_BiQuad ( float sample, voclib_biquad* b )
{
float result;
/* compute the result. */
result = b->a0 * sample + b->a1 * b->x1 + b->a2 * b->x2 -
b->a3 * b->y1 - b->a4 * b->y2;
/* shift x1 to x2, sample to x1. */
b->x2 = b->x1;
b->x1 = sample;
/* shift y1 to y2, result to y1. */
b->y2 = b->y1;
b->y1 = result;
return result;
}
/* filter types. */
enum
{
VOCLIB_LPF, /* low pass filter */
VOCLIB_HPF, /* High pass filter */
VOCLIB_BPF, /* band pass filter */
VOCLIB_NOTCH, /* Notch Filter */
VOCLIB_PEQ, /* Peaking band EQ filter */
VOCLIB_LSH, /* Low shelf filter */
VOCLIB_HSH /* High shelf filter */
};
/* sets up a BiQuad Filter. */
static void voclib_BiQuad_new ( voclib_biquad* b, int type, float dbGain, /* gain of filter */
float freq, /* center frequency */
float srate, /* sampling rate */
float bandwidth ) /* bandwidth in octaves */
{
float A, omega, sn, cs, alpha, beta;
float a0, a1, a2, b0, b1, b2;
/* setup variables. */
A = ( float ) pow ( 10, dbGain / 40.0f );
omega = ( float ) ( 2.0 * VOCLIB_M_PI * freq / srate );
sn = ( float ) sin ( omega );
cs = ( float ) cos ( omega );
alpha = sn * ( float ) sinh ( VOCLIB_M_LN2 / 2 * bandwidth * omega / sn );
beta = ( float ) sqrt ( A + A );
switch ( type )
{
case VOCLIB_LPF:
b0 = ( 1 - cs ) / 2;
b1 = 1 - cs;
b2 = ( 1 - cs ) / 2;
a0 = 1 + alpha;
a1 = -2 * cs;
a2 = 1 - alpha;
break;
case VOCLIB_HPF:
b0 = ( 1 + cs ) / 2;
b1 = - ( 1 + cs );
b2 = ( 1 + cs ) / 2;
a0 = 1 + alpha;
a1 = -2 * cs;
a2 = 1 - alpha;
break;
case VOCLIB_BPF:
b0 = alpha;
b1 = 0;
b2 = -alpha;
a0 = 1 + alpha;
a1 = -2 * cs;
a2 = 1 - alpha;
break;
case VOCLIB_NOTCH:
b0 = 1;
b1 = -2 * cs;
b2 = 1;
a0 = 1 + alpha;
a1 = -2 * cs;
a2 = 1 - alpha;
break;
case VOCLIB_PEQ:
b0 = 1 + ( alpha * A );
b1 = -2 * cs;
b2 = 1 - ( alpha * A );
a0 = 1 + ( alpha / A );
a1 = -2 * cs;
a2 = 1 - ( alpha / A );
break;
case VOCLIB_LSH:
b0 = A * ( ( A + 1 ) - ( A - 1 ) * cs + beta * sn );
b1 = 2 * A * ( ( A - 1 ) - ( A + 1 ) * cs );
b2 = A * ( ( A + 1 ) - ( A - 1 ) * cs - beta * sn );
a0 = ( A + 1 ) + ( A - 1 ) * cs + beta * sn;
a1 = -2 * ( ( A - 1 ) + ( A + 1 ) * cs );
a2 = ( A + 1 ) + ( A - 1 ) * cs - beta * sn;
break;
case VOCLIB_HSH:
b0 = A * ( ( A + 1 ) + ( A - 1 ) * cs + beta * sn );
b1 = -2 * A * ( ( A - 1 ) + ( A + 1 ) * cs );
b2 = A * ( ( A + 1 ) + ( A - 1 ) * cs - beta * sn );
a0 = ( A + 1 ) - ( A - 1 ) * cs + beta * sn;
a1 = 2 * ( ( A - 1 ) - ( A + 1 ) * cs );
a2 = ( A + 1 ) - ( A - 1 ) * cs - beta * sn;
break;
default:
assert ( 0 ); /* Misuse. */
return;
}
/* precompute the coefficients. */
b->a0 = b0 / a0;
b->a1 = b1 / a0;
b->a2 = b2 / a0;
b->a3 = a1 / a0;
b->a4 = a2 / a0;
}
/* Reset the filter history. */
static void voclib_BiQuad_reset ( voclib_biquad* b )
{
b->x1 = b->x2 = 0.0f;
b->y1 = b->y2 = 0.0f;
}
/* Envelope follower. */
static void voclib_envelope_configure ( voclib_envelope* envelope, double time_in_seconds, double sample_rate )
{
envelope->coef = ( float ) ( pow ( 0.01, 1.0 / ( time_in_seconds * sample_rate ) ) );
}
/* Reset the envelope history. */
static void voclib_envelope_reset ( voclib_envelope* envelope )
{
envelope->history[0] = 0.0f;
envelope->history[1] = 0.0f;
envelope->history[2] = 0.0f;
envelope->history[3] = 0.0f;
}
static VOCLIB_INLINE float voclib_envelope_tick ( voclib_envelope* envelope, float sample )
{
const float coef = envelope->coef;
envelope->history[0] = ( float ) ( ( 1.0f - coef ) * fabs ( sample ) ) + ( coef * envelope->history[0] );
envelope->history[1] = ( ( 1.0f - coef ) * envelope->history[0] ) + ( coef * envelope->history[1] );
envelope->history[2] = ( ( 1.0f - coef ) * envelope->history[1] ) + ( coef * envelope->history[2] );
envelope->history[3] = ( ( 1.0f - coef ) * envelope->history[2] ) + ( coef * envelope->history[3] );
return envelope->history[3];
}
/* Initialize the vocoder filterbank. */
static void voclib_initialize_filterbank ( voclib_instance* instance, int carrier_only )
{
unsigned char i;
double step;
double lastfreq = 0.0;
double minfreq = 80.0;
double maxfreq = instance->sample_rate;
if ( maxfreq > 12000.0 )
{
maxfreq = 12000.0;
}
step = pow ( ( maxfreq / minfreq ), ( 1.0 / instance->bands ) );
for ( i = 0; i < instance->bands; ++i )
{
unsigned char i2;
double bandwidth, nextfreq;
double priorfreq = lastfreq;
if ( lastfreq > 0.0 )
{
lastfreq *= step;
}
else
{
lastfreq = minfreq;
}
nextfreq = lastfreq * step;
bandwidth = ( nextfreq - priorfreq ) / lastfreq;
if ( !carrier_only )
{
voclib_BiQuad_new ( &instance->analysis_bands[i].filters[0], VOCLIB_BPF, 0.0f, ( float ) lastfreq, ( float ) instance->sample_rate, ( float ) bandwidth );
for ( i2 = 1; i2 < instance->filters_per_band; ++i2 )
{
instance->analysis_bands[i].filters[i2].a0 = instance->analysis_bands[i].filters[0].a0;
instance->analysis_bands[i].filters[i2].a1 = instance->analysis_bands[i].filters[0].a1;
instance->analysis_bands[i].filters[i2].a2 = instance->analysis_bands[i].filters[0].a2;
instance->analysis_bands[i].filters[i2].a3 = instance->analysis_bands[i].filters[0].a3;
instance->analysis_bands[i].filters[i2].a4 = instance->analysis_bands[i].filters[0].a4;
}
}
if ( instance->formant_shift != 1.0f )
{
voclib_BiQuad_new ( &instance->synthesis_bands[i].filters[0], VOCLIB_BPF, 0.0f, ( float ) ( lastfreq * instance->formant_shift ), ( float ) instance->sample_rate, ( float ) bandwidth );
}
else
{
instance->synthesis_bands[i].filters[0].a0 = instance->analysis_bands[i].filters[0].a0;
instance->synthesis_bands[i].filters[0].a1 = instance->analysis_bands[i].filters[0].a1;
instance->synthesis_bands[i].filters[0].a2 = instance->analysis_bands[i].filters[0].a2;
instance->synthesis_bands[i].filters[0].a3 = instance->analysis_bands[i].filters[0].a3;
instance->synthesis_bands[i].filters[0].a4 = instance->analysis_bands[i].filters[0].a4;
}
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a0 = instance->synthesis_bands[i].filters[0].a0;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a1 = instance->synthesis_bands[i].filters[0].a1;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a2 = instance->synthesis_bands[i].filters[0].a2;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a3 = instance->synthesis_bands[i].filters[0].a3;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[0].a4 = instance->synthesis_bands[i].filters[0].a4;
for ( i2 = 1; i2 < instance->filters_per_band; ++i2 )
{
instance->synthesis_bands[i].filters[i2].a0 = instance->synthesis_bands[i].filters[0].a0;
instance->synthesis_bands[i].filters[i2].a1 = instance->synthesis_bands[i].filters[0].a1;
instance->synthesis_bands[i].filters[i2].a2 = instance->synthesis_bands[i].filters[0].a2;
instance->synthesis_bands[i].filters[i2].a3 = instance->synthesis_bands[i].filters[0].a3;
instance->synthesis_bands[i].filters[i2].a4 = instance->synthesis_bands[i].filters[0].a4;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a0 = instance->synthesis_bands[i].filters[0].a0;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a1 = instance->synthesis_bands[i].filters[0].a1;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a2 = instance->synthesis_bands[i].filters[0].a2;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a3 = instance->synthesis_bands[i].filters[0].a3;
instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2].a4 = instance->synthesis_bands[i].filters[0].a4;
}
}
}
/* Initialize the vocoder envelopes. */
static void voclib_initialize_envelopes ( voclib_instance* instance )
{
unsigned char i;
voclib_envelope_configure ( &instance->analysis_envelopes[0], instance->reaction_time, ( double ) instance->sample_rate );
for ( i = 1; i < instance->bands; ++i )
{
instance->analysis_envelopes[i].coef = instance->analysis_envelopes[0].coef;
}
}
int voclib_initialize ( voclib_instance* instance, unsigned char bands, unsigned char filters_per_band, unsigned int sample_rate, unsigned char carrier_channels )
{
if ( !instance )
{
return 0;
}
if ( bands < 4 || bands > VOCLIB_MAX_BANDS )
{
return 0;
}
if ( filters_per_band < 1 || filters_per_band > VOCLIB_MAX_FILTERS_PER_BAND )
{
return 0;
}
if ( sample_rate < 8000 || sample_rate > 192000 )
{
return 0;
}
if ( carrier_channels < 1 || carrier_channels > 2 )
{
return 0;
}
instance->reaction_time = 0.03f;
instance->formant_shift = 1.0f;
instance->sample_rate = sample_rate;
instance->bands = bands;
instance->filters_per_band = filters_per_band;
instance->carrier_channels = carrier_channels;
voclib_reset_history ( instance );
voclib_initialize_filterbank ( instance, 0 );
voclib_initialize_envelopes ( instance );
return 1;
}
void voclib_reset_history ( voclib_instance* instance )
{
unsigned char i;
for ( i = 0; i < instance->bands; ++i )
{
unsigned char i2;
for ( i2 = 0; i2 < instance->filters_per_band; ++i2 )
{
voclib_BiQuad_reset ( &instance->analysis_bands[i].filters[i2] );
voclib_BiQuad_reset ( &instance->synthesis_bands[i].filters[i2] );
voclib_BiQuad_reset ( &instance->synthesis_bands[i + VOCLIB_MAX_BANDS].filters[i2] );
}
voclib_envelope_reset ( &instance->analysis_envelopes[i] );
}
}
int voclib_process ( voclib_instance* instance, const float* carrier_buffer, const float* modulator_buffer, float* output_buffer, unsigned int frames )
{
unsigned int i;
const unsigned char bands = instance->bands;
const unsigned char filters_per_band = instance->filters_per_band;
if ( !carrier_buffer )
{
return 0;
}
if ( !modulator_buffer )
{
return 0;
}
if ( !output_buffer )
{
return 0;
}
if ( frames == 0 )
{
return 0;
}
if ( instance->carrier_channels == 2 )
{
/* The carrier has two channels and the modulator has 1. */
for ( i = 0; i < frames * 2; i += 2, ++modulator_buffer )
{
unsigned char i2;
float out_left = 0.0f;
float out_right = 0.0f;
/* Run the bands in parallel and accumulate the output. */
for ( i2 = 0; i2 < bands; ++i2 )
{
unsigned char i3;
float analysis_band = voclib_BiQuad ( *modulator_buffer, &instance->analysis_bands[i2].filters[0] );
float synthesis_band_left = voclib_BiQuad ( carrier_buffer[i], &instance->synthesis_bands[i2].filters[0] );
float synthesis_band_right = voclib_BiQuad ( carrier_buffer[i + 1], &instance->synthesis_bands[i2 + VOCLIB_MAX_BANDS].filters[0] );
for ( i3 = 1; i3 < filters_per_band; ++i3 )
{
analysis_band = voclib_BiQuad ( analysis_band, &instance->analysis_bands[i2].filters[i3] );
synthesis_band_left = voclib_BiQuad ( synthesis_band_left, &instance->synthesis_bands[i2].filters[i3] );
synthesis_band_right = voclib_BiQuad ( synthesis_band_right, &instance->synthesis_bands[i2 + VOCLIB_MAX_BANDS].filters[i3] );
}
analysis_band = voclib_envelope_tick ( &instance->analysis_envelopes[i2], analysis_band );
out_left += synthesis_band_left * analysis_band;
out_right += synthesis_band_right * analysis_band;
}
output_buffer[i] = out_left;
output_buffer[i + 1] = out_right;
}
}
else
{
/* Both the carrier and the modulator have a single channel. */
for ( i = 0; i < frames; ++i )
{
unsigned char i2;
float out = 0.0f;
/* Run the bands in parallel and accumulate the output. */
for ( i2 = 0; i2 < bands; ++i2 )
{
unsigned char i3;
float analysis_band = voclib_BiQuad ( modulator_buffer[i], &instance->analysis_bands[i2].filters[0] );
float synthesis_band = voclib_BiQuad ( carrier_buffer[i], &instance->synthesis_bands[i2].filters[0] );
for ( i3 = 1; i3 < filters_per_band; ++i3 )
{
analysis_band = voclib_BiQuad ( analysis_band, &instance->analysis_bands[i2].filters[i3] );
synthesis_band = voclib_BiQuad ( synthesis_band, &instance->synthesis_bands[i2].filters[i3] );
}
analysis_band = voclib_envelope_tick ( &instance->analysis_envelopes[i2], analysis_band );
out += synthesis_band * analysis_band;
}
output_buffer[i] = out;
}
}
return 1;
}
int voclib_set_reaction_time ( voclib_instance* instance, float reaction_time )
{
if ( reaction_time < 0.002f || reaction_time > 2.0f )
{
return 0;
}
instance->reaction_time = reaction_time;
voclib_initialize_envelopes ( instance );
return 1;
}
float voclib_get_reaction_time ( const voclib_instance* instance )
{
return instance->reaction_time;
}
int voclib_set_formant_shift ( voclib_instance* instance, float formant_shift )
{
if ( formant_shift < 0.25f || formant_shift > 4.0f )
{
return 0;
}
instance->formant_shift = formant_shift;
voclib_initialize_filterbank ( instance, 1 );
return 1;
}
float voclib_get_formant_shift ( const voclib_instance* instance )
{
return instance->formant_shift;
}
#endif /* VOCLIB_IMPLEMENTATION */
/* REVISION HISTORY
*
* Version 1.1 - 2019-02-16
* Breaking change: Introduced a new argument to voclib_initialize called carrier_channels. This allows the vocoder to output stereo natively.
* Better assignment of band frequencies when using lower sample rates.
* The shell now automatically normalizes the output file to match the peak amplitude in the carrier.
* Fixed a memory corruption bug in the shell which would occur in response to an error condition.
*
* Version 1.0 - 2019-01-27
* Initial release.
*/
/* LICENSE
This software is available under 2 licenses -- choose whichever you prefer.
------------------------------------------------------------------------------
ALTERNATIVE A - MIT No Attribution License
Copyright (c) 2019 Philip Bennefall
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
------------------------------------------------------------------------------
ALTERNATIVE B - Public Domain (www.unlicense.org)
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
software, either in source code form or as a compiled binary, for any purpose,
commercial or non-commercial, and by any means.
In jurisdictions that recognize copyright laws, the author or authors of this
software dedicate any and all copyright interest in the software to the public
domain. We make this dedication for the benefit of the public at large and to
the detriment of our heirs and successors. We intend this dedication to be an
overt act of relinquishment in perpetuity of all present and future rights to
this software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------
*/
#define STB_VORBIS_HEADER_ONLY
#include "../extras/stb_vorbis.c" /* Enables Vorbis decoding. */
#define MA_EXPERIMENTAL__DATA_LOOPING_AND_CHAINING
#define MA_DEBUG_OUTPUT
#define MA_IMPLEMENTATION
#include "../miniaudio.h"
#include "miniaudio_engine.h"
typedef struct
{
ma_async_notification_callbacks cb;
ma_sound* pSound;
} sound_loaded_notification;
void on_sound_loaded(ma_async_notification* pNotification)
{
//sound_loaded_notification* pLoadedNotification = (sound_loaded_notification*)pNotification;
//ma_uint64 lengthInPCMFrames;
(void)pNotification;
/*
This will be fired when the sound has finished loading. We should be able to retrieve the length of the sound at this point. Here we'll just set
the fade out time.
*/
//ma_sound_get_length_in_pcm_frames(pLoadedNotification->pSound, &lengthInPCMFrames);
//ma_sound_set_fade_point_in_frames(pLoadedNotification->pSound, 1, 1, 0, lengthInPCMFrames - 192000, lengthInPCMFrames);
}
int main(int argc, char** argv)
{
ma_result result;
ma_resource_manager resourceManager;
ma_resource_manager_config resourceManagerConfig;
ma_engine engine;
ma_engine_config engineConfig;
ma_sound baseSound;
ma_sound sound;
ma_sound sound2;
sound_loaded_notification loadNotification;
ma_sound_group group;
if (argc < 2) {
printf("No input file.\n");
return -1;
}
resourceManagerConfig = ma_resource_manager_config_init();
//resourceManagerConfig.decodedFormat = ma_format_f32;
//resourceManagerConfig.decodedChannels = 2;
resourceManagerConfig.decodedSampleRate = 48000;
//resourceManagerConfig.flags |= MA_RESOURCE_MANAGER_FLAG_NO_THREADING;
resourceManagerConfig.jobThreadCount = 1;
resourceManagerConfig.jobQueueCapacity = 8;
result = ma_resource_manager_init(&resourceManagerConfig, &resourceManager);
if (result != MA_SUCCESS) {
printf("Failed to initialize resource manager.\n");
return -1;
}
engineConfig = ma_engine_config_init();
engineConfig.pResourceManager = &resourceManager;
result = ma_engine_init(&engineConfig, &engine);
if (result != MA_SUCCESS) {
printf("Failed to initialize audio engine.\n");
return -1;
}
result = ma_sound_group_init(&engine, 0, NULL, &group);
if (result != MA_SUCCESS) {
printf("Failed to initialize sound group.");
return -1;
}
#if 1
loadNotification.cb.onSignal = on_sound_loaded;
loadNotification.pSound = &sound;
ma_sound_config soundConfig = ma_sound_config_init();
soundConfig.pFilePath = argv[1];
soundConfig.flags = MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM;
soundConfig.pInitialAttachment = &group;
soundConfig.loopPointBegInPCMFrames = 0;
soundConfig.loopPointEndInPCMFrames = 48000;
soundConfig.isLooping = MA_TRUE;
result = ma_sound_init_ex(&engine, &soundConfig, &sound);
if (result != MA_SUCCESS) {
printf("Failed to load sound: %s\n", argv[1]);
ma_engine_uninit(&engine);
return -1;
}
/*result = ma_sound_init_copy(&engine, &baseSound, 0, &group, &sound);
if (result != MA_SUCCESS) {
printf("Failed to copy sound.\n");
return -1;
}*/
#if 0
result = ma_sound_init_from_file(&engine, argv[1], MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_DECODE /*| MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_ASYNC | MA_RESOURCE_MANAGER_DATA_SOURCE_FLAG_STREAM*/, NULL, &sound2);
if (result != MA_SUCCESS) {
printf("Failed to load sound: %s\n", argv[1]);
ma_engine_uninit(&engine);
return -1;
}
#endif
/*ma_data_source_seek_to_pcm_frame(sound.pDataSource, 5000000);*/
//ma_sound_group_set_pan(ma_engine_get_master_sound_group(&engine), -1);
ma_sound_group_set_pitch(&group, 1.5f);
//ma_sound_group_set_start_time(ma_engine_get_master_sound_group(&engine), 2000);
//ma_sound_group_set_fade_in_milliseconds(&group, 0, 1, 5000);
//ma_sound_group_stop(&group);
//ma_engine_listener_set_enabled(&engine, 0, MA_FALSE);
//ma_sound_set_fade_in_milliseconds(&sound, 0, 1, 5000);
/*ma_sound_set_volume(&sound, 0.25f);*/
/*ma_sound_set_pitch(&sound, 1.1f);*/
/*ma_sound_set_pan(&sound, 0.0f);*/
ma_sound_set_looping(&sound, MA_TRUE);
//ma_data_source_set_range_in_pcm_frames(ma_sound_get_data_source(&sound), 0, 48000);
//ma_data_source_set_loop_point_in_pcm_frames(ma_sound_get_data_source(&sound), 0, 48000);
//ma_sound_seek_to_pcm_frame(&sound, 6000000);
//ma_sound_set_start_time(&sound, 1110);
//ma_sound_set_volume(&sound, 0.5f);
//ma_sound_set_fade_point_in_milliseconds(&sound, 0, 0, 1, 0, 2000);
//ma_sound_set_fade_point_auto_reset(&sound, 0, MA_FALSE); /* Enable fading around loop transitions. */
//ma_sound_set_fade_point_auto_reset(&sound, 1, MA_FALSE);
//ma_sound_set_stop_time(&sound, 1000);
//ma_sound_set_volume(&sound, 1);
//ma_sound_set_start_time(&sound, 48000);
ma_sound_set_position(&sound, 0, 0, -1);
//ma_sound_set_spatialization_enabled(&sound, MA_FALSE);
ma_sound_start(&sound);
/*ma_sound_uninit(&sound);*/
//ma_sleep(1000);
//ma_sound_set_looping(&sound2, MA_TRUE);
//ma_sound_set_volume(&sound2, 0.5f);
//ma_sound_start(&sound2);
//ma_sleep(2000);
//printf("Stopping...\n");
//ma_sound_stop(&sound);
//ma_sound_group_stop(ma_engine_get_master_sound_group(&engine));
#endif
#if 1
/*ma_engine_play_sound(&engine, argv[1], NULL);*/
/*ma_engine_play_sound(&engine, argv[2], NULL);
ma_engine_play_sound(&engine, argv[3], NULL);*/
#endif
#if 0
for (;;) {
ma_resource_manager_process_next_job(&resourceManager);
ma_sleep(5);
}
#endif
#if 1
float maxX = +1;
float minX = -1;
float posX = 0;
float posZ = -1.0f;
float step = 0.1f;
float stepAngle = 0.02f;
float angle = 0;
float pitch = 1;
float pitchStep = 0.01f;
float pitchMin = 0.125f;
float pitchMax = 2;
for (;;) {
pitch += pitchStep;
if (pitch < pitchMin) {
pitch = pitchMin;
pitchStep = -pitchStep;
}
if (pitch > pitchMax) {
pitch = pitchMax;
pitchStep = -pitchStep;
}
//ma_sound_group_set_pitch(ma_engine_get_master_sound_group(&engine), pitch);
//ma_sound_set_pitch(&sound, pitch);
//ma_sound_set_volume(&sound, pitch);
//ma_sound_set_pan(&sound, pitch);
//printf("Pitch: %f\n", pitch);
posX += step;
if (posX > maxX) {
posX = maxX;
step = -step;
} else if (posX < minX) {
posX = minX;
step = -step;
}
//ma_spatializer_set_position(&g_spatializer, ma_vec3f_init_3f(posX, 0, posZ));
ma_sound_set_position(&sound, 0, 0, -2);
ma_engine_listener_set_position(&engine, 0, 0, 0, -10);
ma_engine_listener_set_direction(&engine, 0, -1, 0, 0);
//ma_sound_set_velocity(&sound, step*1000, 0, 0);
ma_engine_listener_set_direction(&engine, 0, (float)ma_cosd(angle), 0, (float)ma_sind(angle));
angle += stepAngle;
ma_sleep(1);
}
#endif
printf("Press Enter to quit...");
getchar();
ma_sound_uninit(&sound);
ma_engine_uninit(&engine);
return 0;
}
/* stb_vorbis implementation must come after the implementation of miniaudio. */
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(push)
#pragma warning(disable:4100) /* unreferenced formal parameter */
#pragma warning(disable:4244) /* '=': conversion from '' to '', possible loss of data */
#pragma warning(disable:4245) /* '=': conversion from '' to '', signed/unsigned mismatch */
#pragma warning(disable:4456) /* declaration of '' hides previous local declaration */
#pragma warning(disable:4457) /* declaration of '' hides function parameter */
#pragma warning(disable:4701) /* potentially uninitialized local variable '' used */
#else
#endif
#undef STB_VORBIS_HEADER_ONLY
#include "../extras/stb_vorbis.c"
#if defined(_MSC_VER) && !defined(__clang__)
#pragma warning(pop)
#else
#endif
/* !!! THIS FILE WILL BE MERGED INTO miniaudio.h WHEN COMPLETE !!! */
/*
EXPERIMENTAL
============
Everything in this file is experimental and subject to change. Some stuff isn't yet implemented, in particular spatialization. I've noted some ideas that are
basically straight off the top of my head - many of these are probably outright wrong or just generally bad ideas.
Very simple APIs for spatialization are declared by not yet implemented. They're just placeholders to give myself an idea on some of the API design.
The idea is that you have an `ma_engine` object - one per listener. Decoupled from that is the `ma_resource_manager` object. You can have one `ma_resource_manager`
object to many `ma_engine` objects. This will allow you to share resources between each listener. The `ma_engine` is responsible for the playback of audio from a
list of data sources. The `ma_resource_manager` is responsible for the actual loading, caching and unloading of those data sources. This decoupling is
something that I'm really liking right now and will likely stay in place for the final version.
You create "sounds" from the engine which represent a sound/voice in the world. You first need to create a sound, and then you need to start it. Sounds do not
start by default. You can use `ma_engine_play_sound()` to "fire and forget" sounds.
Sounds can be allocated to groups called `ma_sound_group`. This is how you can support submixing and is one way you could achieve the kinds of groupings you see
in games for things like SFX, Music and Voices. Unlike sounds, groups are started by default. When you stop a group, all sounds within that group will be
stopped atomically. When the group is started again, all sounds attached to the group will also be started, so long as the sound is also marked as started.
The creation and deletion of sounds and groups should be thread safe.
The engine runs on top of a node graph, and sounds and groups are just nodes within that graph. The output of a sound can be attached to the input of any node
on the graph. To apply an effect to a sound or group, attach it's output to the input of an effect node. See the Routing Infrastructure section below for
details on this.
The best resource to use when understanding the API is the function declarations for `ma_engine`. I expect you should be able to figure it out! :)
*/
#ifndef miniaudio_engine_h
#define miniaudio_engine_h
#ifdef __cplusplus
extern "C" {
#endif
/*
Engine
======
The `ma_engine` API is a high-level API for audio playback. Internally it contains sounds (`ma_sound`) with resources managed via a resource manager
(`ma_resource_manager`).
Within the world there is the concept of a "listener". Each `ma_engine` instances has a single listener, but you can instantiate multiple `ma_engine` instances
if you need more than one listener. In this case you will want to share a resource manager which you can do by initializing one manually and passing it into
`ma_engine_config`. Using this method will require your application to manage groups and sounds on a per `ma_engine` basis.
*/
#ifdef __cplusplus
}
#endif
#endif /* miniaudio_engine_h */
#if defined(MA_IMPLEMENTATION) || defined(MINIAUDIO_IMPLEMENTATION)
#endif
#define MINIAUDIO_IMPLEMENTATION
#include "../miniaudio.h"
#include "miniaudio_engine.h"
ma_node_graph g_nodeGraph;
ma_data_source_node g_dataSourceNode;
ma_splitter_node g_splitterNode;
ma_splitter_node g_loopNode; /* For testing loop detection. We're going to route one of these endpoints back to g_splitterNode to form a loop. */
void data_callback(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount)
{
/* Read straight from our node graph. */
ma_node_graph_read_pcm_frames(&g_nodeGraph, pFramesOut, frameCount, NULL);
(void)pDevice; /* Unused. */
(void)pFramesIn; /* Unused. */
}
int main(int argc, char** argv)
{
ma_result result;
ma_device_config deviceConfig;
ma_device device;
ma_decoder_config decoderConfig;
ma_decoder decoder;
ma_node_graph_config nodeGraphConfig;
ma_data_source_node_config dataSourceNodeConfig;
ma_splitter_node_config splitterNodeConfig;
if (argc <= 1) {
printf("No input file.");
return -1;
}
deviceConfig = ma_device_config_init(ma_device_type_playback);
deviceConfig.playback.format = ma_format_f32; /* The node graph API only supports f32. */
deviceConfig.playback.channels = 2;
deviceConfig.sampleRate = 48000;
deviceConfig.dataCallback = data_callback;
deviceConfig.pUserData = NULL;
result = ma_device_init(NULL, &deviceConfig, &device);
if (result != MA_SUCCESS) {
printf("Failed to initialize device.");
return -1;
}
/*
Set up the new graph before starting the device so that we have something to read from as soon
as the device requests data. It doesn't matter what order we do this, but I'm starting with the
data source node since it makes more logical sense to me to start with the start of the chain.
*/
nodeGraphConfig = ma_node_graph_config_init(device.playback.channels);
result = ma_node_graph_init(&nodeGraphConfig, NULL, &g_nodeGraph);
if (result != MA_SUCCESS) {
printf("Failed to initialize node graph.");
return -1;
}
/*
We want the decoder to use the same format as the device. This way we can keep the entire node
graph using the same format/channels/rate to avoid the need to do data conversion.
*/
decoderConfig = ma_decoder_config_init(device.playback.format, device.playback.channels, device.sampleRate);
result = ma_decoder_init_file(argv[1], &decoderConfig, &decoder);
if (result != MA_SUCCESS) {
printf("Failed to initialize decoder.");
ma_device_uninit(&device);
return -1;
}
dataSourceNodeConfig = ma_data_source_node_config_init(&decoder, MA_TRUE);
result = ma_data_source_node_init(&g_nodeGraph, &dataSourceNodeConfig, NULL, &g_dataSourceNode);
if (result != MA_SUCCESS) {
printf("Failed to initialize data source node.");
ma_decoder_uninit(&decoder);
ma_device_uninit(&device);
return -1;
}
/*result = ma_node_attach_output_bus(&g_dataSourceNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
if (result != MA_SUCCESS) {
printf("Failed to attach node.");
return -1;
}*/
/*ma_node_set_state_time(&g_dataSourceNode, ma_node_state_started, 48000*1);
ma_node_set_state_time(&g_dataSourceNode, ma_node_state_stopped, 48000*5);*/
#if 1
/*
Splitter node. Note that we've already attached the data source node to another, so this section
will test that changing of attachments works as expected.
*/
splitterNodeConfig = ma_splitter_node_config_init(device.playback.channels);
/* Loop detection testing. */
result = ma_splitter_node_init(&g_nodeGraph, &splitterNodeConfig, NULL, &g_loopNode);
if (result != MA_SUCCESS) {
printf("Failed to initialize loop node.");
return -1;
}
/* Adjust the volume of the splitter node's endpoints. We'll just do it 50/50 so that both of them combine to reproduce the original signal at the endpoint. */
ma_node_set_output_bus_volume(&g_loopNode, 0, 1.0f);
ma_node_set_output_bus_volume(&g_loopNode, 1, 1.0f);
result = ma_splitter_node_init(&g_nodeGraph, &splitterNodeConfig, NULL, &g_splitterNode);
if (result != MA_SUCCESS) {
printf("Failed to initialize splitter node.");
return -1;
}
#if 0
/* Connect both outputs of the splitter to the endpoint for now. Later on we'll test effects and whatnot. */
ma_node_attach_output_bus(&g_splitterNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
ma_node_attach_output_bus(&g_splitterNode, 1, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
/* Adjust the volume of the splitter node's endpoints. We'll just do it 50/50 so that both of them combine to reproduce the original signal at the endpoint. */
ma_node_set_output_bus_volume(&g_splitterNode, 0, 0.5f);
ma_node_set_output_bus_volume(&g_splitterNode, 1, 0.5f);
/* The data source needs to have it's connection changed from the endpoint to the splitter. */
ma_node_attach_output_bus(&g_dataSourceNode, 0, &g_splitterNode, 0);
#else
/* Connect the loop node directly to the output. */
ma_node_attach_output_bus(&g_loopNode, 0, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
ma_node_attach_output_bus(&g_loopNode, 1, ma_node_graph_get_endpoint(&g_nodeGraph), 0);
/* Connect the splitter node directly to the loop node. */
ma_node_attach_output_bus(&g_splitterNode, 0, &g_loopNode, 0);
ma_node_attach_output_bus(&g_splitterNode, 1, &g_loopNode, 1);
/* Connect the data source node to the splitter node. */
ma_node_attach_output_bus(&g_dataSourceNode, 0, &g_splitterNode, 0);
/* Now loop back to the splitter node to form a loop. */
ma_node_attach_output_bus(&g_loopNode, 1, &g_splitterNode, 0);
#endif
#endif
/* Stop the splitter node for testing. */
/*ma_node_set_state(&g_splitterNode, ma_node_state_stopped);*/
/*
Only start the device after our nodes have been set up. We passed in `deviceNode` as the user
data to the data callback so we need to make sure it's initialized before we start the device.
*/
result = ma_device_start(&device);
if (result != MA_SUCCESS) {
ma_device_uninit(&device);
return -1;
}
printf("Press Enter to quit...");
getchar();
/* Teardown. These are uninitialized in a weird order here just for demonstration. */
/* We should be able to safely destroy the node while the device is still running. */
ma_data_source_node_uninit(&g_dataSourceNode, NULL);
/* The device needs to be stopped before we uninitialize the node graph or else the device's callback will try referencing the node graph. */
ma_device_uninit(&device);
/* The node graph will be referenced by the device's data called so it needs to be uninitialized after the device has stopped. */
ma_node_graph_uninit(&g_nodeGraph, NULL);
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment