Skip to content

Commit

Permalink
Remove native logging, less overhead and memory leaks from native cal…
Browse files Browse the repository at this point in the history
…lbacks
  • Loading branch information
fgnm committed Dec 7, 2023
1 parent 8da19f4 commit a9fe9d4
Show file tree
Hide file tree
Showing 4 changed files with 120 additions and 83 deletions.
3 changes: 2 additions & 1 deletion CHANGES
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
[0.4]
- Update MiniAudio to 0.11.21
- [BREAK] Remove native logs and forward them to libGDX Logging
- Update MiniAudio to 0.11.22 [TBD]
- Update libGDX to 1.12.1
- Support MASound end callback
- Allow to disable low latency mode
Expand Down
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,16 @@ miniAudio.setupAndroid(Context#getAssets());

You can safely pass `null` on other platforms.

### Proguard Rules
If your Java code is obfuscated, be sure to keep required JNI methods.

```
-keepclassmembers class games.rednblack.miniaudio.MiniAudio {
public void on_native_sound_end(long);
public void on_native_log(int, String);
}
```

### Effects Graph

MiniAudio comes with a powerful effects system based on graph design.
Expand Down
113 changes: 68 additions & 45 deletions jni/miniaudio.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
Audio playback and capture library. Choice of public domain or MIT-0. See license statements at the end of this file.
miniaudio - v0.11.21 - 2023-11-15
miniaudio - v0.11.22 - TBD

David Reid - mackron@gmail.com

Expand Down Expand Up @@ -3723,7 +3723,7 @@ extern "C" {

#define MA_VERSION_MAJOR 0
#define MA_VERSION_MINOR 11
#define MA_VERSION_REVISION 21
#define MA_VERSION_REVISION 22
#define MA_VERSION_STRING MA_XSTRINGIFY(MA_VERSION_MAJOR) "." MA_XSTRINGIFY(MA_VERSION_MINOR) "." MA_XSTRINGIFY(MA_VERSION_REVISION)

#if defined(_MSC_VER) && !defined(__clang__)
Expand Down Expand Up @@ -7248,6 +7248,10 @@ struct ma_context_config
void* pUserData;
ma_allocation_callbacks allocationCallbacks;
struct
{
ma_handle hWnd; /* HWND. Optional window handle to pass into SetCooperativeLevel(). Will default to the foreground window, and if that fails, the desktop window. */
} dsound;
struct
{
ma_bool32 useVerboseDeviceEnumeration;
} alsa;
Expand Down Expand Up @@ -7336,6 +7340,7 @@ struct ma_context
#ifdef MA_SUPPORT_DSOUND
struct
{
ma_handle hWnd; /* Can be null. */
ma_handle hDSoundDLL;
ma_proc DirectSoundCreate;
ma_proc DirectSoundEnumerateA;
Expand Down Expand Up @@ -17939,6 +17944,10 @@ DEVICE I/O
#endif
#endif

/* This must be set to at least 26. */
#ifndef MA_AAUDIO_MIN_ANDROID_SDK_VERSION
#define MA_AAUDIO_MIN_ANDROID_SDK_VERSION 26
#endif


MA_API void ma_device_info_add_native_data_format(ma_device_info* pDeviceInfo, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_uint32 flags)
Expand Down Expand Up @@ -18085,7 +18094,7 @@ MA_API ma_bool32 ma_is_backend_enabled(ma_backend backend)
#if defined(MA_HAS_AAUDIO)
#if defined(MA_ANDROID)
{
return ma_android_sdk_version() >= 31;
return ma_android_sdk_version() >= MA_AAUDIO_MIN_ANDROID_SDK_VERSION;
}
#else
return MA_FALSE;
Expand Down Expand Up @@ -18670,11 +18679,16 @@ static void ma_device__on_notification_rerouted(ma_device* pDevice)
#endif

#if defined(MA_EMSCRIPTEN)
EMSCRIPTEN_KEEPALIVE
void ma_device__on_notification_unlocked(ma_device* pDevice)
#ifdef __cplusplus
extern "C" {
#endif
void EMSCRIPTEN_KEEPALIVE ma_device__on_notification_unlocked(ma_device* pDevice)
{
ma_device__on_notification(ma_device_notification_init(pDevice, ma_device_notification_type_unlocked));
}
#ifdef __cplusplus
}
#endif
#endif


Expand Down Expand Up @@ -24024,9 +24038,12 @@ static ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma
}

/* The cooperative level must be set before doing anything else. */
hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)();
hWnd = (HWND)pContext->dsound.hWnd;
if (hWnd == 0) {
hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)();
hWnd = ((MA_PFN_GetForegroundWindow)pContext->win32.GetForegroundWindow)();
if (hWnd == 0) {
hWnd = ((MA_PFN_GetDesktopWindow)pContext->win32.GetDesktopWindow)();
}
}

hr = ma_IDirectSound_SetCooperativeLevel(pDirectSound, hWnd, (shareMode == ma_share_mode_exclusive) ? MA_DSSCL_EXCLUSIVE : MA_DSSCL_PRIORITY);
Expand Down Expand Up @@ -25343,6 +25360,8 @@ static ma_result ma_context_init__dsound(ma_context* pContext, const ma_context_
return MA_API_NOT_FOUND;
}

pContext->dsound.hWnd = pConfig->dsound.hWnd;

pCallbacks->onContextInit = ma_context_init__dsound;
pCallbacks->onContextUninit = ma_context_uninit__dsound;
pCallbacks->onContextEnumerateDevices = ma_context_enumerate_devices__dsound;
Expand Down Expand Up @@ -28063,6 +28082,7 @@ static ma_result ma_device_stop__alsa(ma_device* pDevice)
a small chance that our wakeupfd has not been cleared. We'll clear that out now if applicable.
*/
int resultPoll;
int resultRead;

if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Dropping capture device...\n");
Expand All @@ -28077,12 +28097,15 @@ static ma_result ma_device_stop__alsa(ma_device* pDevice)
ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Preparing capture device successful.\n");
}

/* Clear the wakeupfd. */
resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture, 1, 0);
if (resultPoll > 0) {
ma_uint64 t;
read(((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture)[0].fd, &t, sizeof(t));
}
/* Clear the wakeupfd. */
resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture, 1, 0);
if (resultPoll > 0) {
ma_uint64 t;
resultRead = read(((struct pollfd*)pDevice->alsa.pPollDescriptorsCapture)[0].fd, &t, sizeof(t));
if (resultRead != sizeof(t)) {
ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Failed to read from capture wakeupfd. read() = %d\n", resultRead);
}
}
}

if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
Expand All @@ -28099,11 +28122,14 @@ static ma_result ma_device_stop__alsa(ma_device* pDevice)
}

/* Clear the wakeupfd. */
resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback, 1, 0);
if (resultPoll > 0) {
ma_uint64 t;
read(((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback)[0].fd, &t, sizeof(t));
}
resultPoll = poll((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback, 1, 0);
if (resultPoll > 0) {
ma_uint64 t;
resultRead = read(((struct pollfd*)pDevice->alsa.pPollDescriptorsPlayback)[0].fd, &t, sizeof(t));
if (resultRead != sizeof(t)) {
ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_DEBUG, "[ALSA] Failed to read from playback wakeupfd. read() = %d\n", resultRead);
}
}

}

Expand Down Expand Up @@ -39774,7 +39800,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
#if defined(MA_USE_AUDIO_WORKLETS)
{
EM_ASM({
var device = miniaudio.get_device_by_index($0);
var device = window.miniaudio.get_device_by_index($0);

if (device.streamNode !== undefined) {
device.streamNode.disconnect();
Expand All @@ -39789,7 +39815,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)
#else
{
EM_ASM({
var device = miniaudio.get_device_by_index($0);
var device = window.miniaudio.get_device_by_index($0);

/* Make sure all nodes are disconnected and marked for collection. */
if (device.scriptNode !== undefined) {
Expand All @@ -39816,7 +39842,7 @@ static ma_result ma_device_uninit__webaudio(ma_device* pDevice)

/* Clean up the device on the JS side. */
EM_ASM({
miniaudio.untrack_device_by_index($0);
window.miniaudio.untrack_device_by_index($0);
}, pDevice->webaudio.deviceIndex);

ma_free(pDevice->webaudio.pIntermediaryBuffer, &pDevice->pContext->allocationCallbacks);
Expand Down Expand Up @@ -39981,7 +40007,6 @@ static void ma_audio_worklet_processor_created__webaudio(EMSCRIPTEN_WEBAUDIO_T a
return;
}


pParameters->pDevice->webaudio.audioWorklet = emscripten_create_wasm_audio_worklet_node(audioContext, "miniaudio", &audioWorkletOptions, &ma_audio_worklet_process_callback__webaudio, pParameters->pDevice);

/* With the audio worklet initialized we can now attach it to the graph. */
Expand Down Expand Up @@ -40121,7 +40146,6 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
/* It's not clear if this can return an error. None of the tests in the Emscripten repository check for this, so neither am I for now. */
pDevice->webaudio.audioContext = emscripten_create_audio_context(&audioContextAttributes);


/*
With the context created we can now create the worklet. We can only have a single worklet per audio
context which means we'll need to craft this appropriately to handle duplex devices correctly.
Expand Down Expand Up @@ -40170,7 +40194,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co

/* We need to add an entry to the miniaudio.devices list on the JS side so we can do some JS/C interop. */
pDevice->webaudio.deviceIndex = EM_ASM_INT({
return miniaudio.track_device({
return window.miniaudio.track_device({
webaudio: emscriptenGetAudioObject($0),
state: 1 /* 1 = ma_device_state_stopped */
});
Expand Down Expand Up @@ -40255,11 +40279,11 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
/* The node processing callback. */
device.scriptNode.onaudioprocess = function(e) {
if (device.intermediaryBufferView == null || device.intermediaryBufferView.length == 0) {
device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels);
device.intermediaryBufferView = new Float32Array(HEAPF32.buffer, pIntermediaryBuffer, bufferSize * channels);
}

/* Do the capture side first. */
if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) {
if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) {
/* The data must be interleaved before being processed miniaudio. */
for (var iChannel = 0; iChannel < channels; iChannel += 1) {
var inputBuffer = e.inputBuffer.getChannelData(iChannel);
Expand All @@ -40273,7 +40297,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
_ma_device_process_pcm_frames_capture__webaudio(pDevice, bufferSize, pIntermediaryBuffer);
}

if (deviceType == miniaudio.device_type.playback || deviceType == miniaudio.device_type.duplex) {
if (deviceType == window.miniaudio.device_type.playback || deviceType == window.miniaudio.device_type.duplex) {
_ma_device_process_pcm_frames_playback__webaudio(pDevice, bufferSize, pIntermediaryBuffer);

for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) {
Expand All @@ -40293,7 +40317,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
};

/* Now we need to connect our node to the graph. */
if (deviceType == miniaudio.device_type.capture || deviceType == miniaudio.device_type.duplex) {
if (deviceType == window.miniaudio.device_type.capture || deviceType == window.miniaudio.device_type.duplex) {
navigator.mediaDevices.getUserMedia({audio:true, video:false})
.then(function(stream) {
device.streamNode = device.webaudio.createMediaStreamSource(stream);
Expand All @@ -40305,13 +40329,13 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
});
}

if (deviceType == miniaudio.device_type.playback) {
if (deviceType == window.miniaudio.device_type.playback) {
device.scriptNode.connect(device.webaudio.destination);
}

device.pDevice = pDevice;

return miniaudio.track_device(device);
return window.miniaudio.track_device(device);
}, pConfig->deviceType, channels, sampleRate, periodSizeInFrames, pDevice->webaudio.pIntermediaryBuffer, pDevice);

if (deviceIndex < 0) {
Expand All @@ -40321,7 +40345,7 @@ static ma_result ma_device_init__webaudio(ma_device* pDevice, const ma_device_co
pDevice->webaudio.deviceIndex = deviceIndex;

/* Grab the sample rate from the audio context directly. */
sampleRate = (ma_uint32)EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);
sampleRate = (ma_uint32)EM_ASM_INT({ return window.miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex);

if (pDescriptorCapture != NULL) {
pDescriptorCapture->format = ma_format_f32;
Expand Down Expand Up @@ -40351,9 +40375,9 @@ static ma_result ma_device_start__webaudio(ma_device* pDevice)
MA_ASSERT(pDevice != NULL);

EM_ASM({
var device = miniaudio.get_device_by_index($0);
var device = window.miniaudio.get_device_by_index($0);
device.webaudio.resume();
device.state = miniaudio.device_state.started;
device.state = window.miniaudio.device_state.started;
}, pDevice->webaudio.deviceIndex);

return MA_SUCCESS;
Expand All @@ -40373,9 +40397,9 @@ static ma_result ma_device_stop__webaudio(ma_device* pDevice)
do any kind of explicit draining.
*/
EM_ASM({
var device = miniaudio.get_device_by_index($0);
var device = window.miniaudio.get_device_by_index($0);
device.webaudio.suspend();
device.state = miniaudio.device_state.stopped;
device.state = window.miniaudio.device_state.stopped;
}, pDevice->webaudio.deviceIndex);

ma_device__on_notification_stopped(pDevice);
Expand Down Expand Up @@ -40434,6 +40458,7 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex
window.miniaudio.device_state.started = $4;

/* Device cache for mapping devices to indexes for JavaScript/C interop. */
let miniaudio = window.miniaudio;
miniaudio.devices = [];

miniaudio.track_device = function(device) {
Expand Down Expand Up @@ -40485,13 +40510,13 @@ static ma_result ma_context_init__webaudio(ma_context* pContext, const ma_contex
var device = miniaudio.devices[i];
if (device != null &&
device.webaudio != null &&
device.state === window.miniaudio.device_state.started) {
device.state === miniaudio.device_state.started) {

device.webaudio.resume().then(() => {
Module._ma_device__on_notification_unlocked(device.pDevice);
},
(error) => {console.error("Failed to resume audiocontext", error);
});
_ma_device__on_notification_unlocked(device.pDevice);
},
(error) => {console.error("Failed to resume audiocontext", error);
});
}
}
miniaudio.unlock_event_types.map(function(event_type) {
Expand Down Expand Up @@ -59758,7 +59783,7 @@ extern "C" {
#define MA_DR_WAV_XSTRINGIFY(x) MA_DR_WAV_STRINGIFY(x)
#define MA_DR_WAV_VERSION_MAJOR 0
#define MA_DR_WAV_VERSION_MINOR 13
#define MA_DR_WAV_VERSION_REVISION 13
#define MA_DR_WAV_VERSION_REVISION 14
#define MA_DR_WAV_VERSION_STRING MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MAJOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_MINOR) "." MA_DR_WAV_XSTRINGIFY(MA_DR_WAV_VERSION_REVISION)
#include <stddef.h>
#define MA_DR_WAVE_FORMAT_PCM 0x1
Expand Down Expand Up @@ -72275,7 +72300,7 @@ static ma_result ma_node_detach_full(ma_node* pNode)
linked list logic. We don't need to worry about the audio thread referencing these because the step
above severed the connection to the graph.
*/
for (pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); pOutputBus != NULL; pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pOutputBus->pNext)) {
for (pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext); pOutputBus != NULL; pOutputBus = (ma_node_output_bus*)ma_atomic_load_ptr(&pInputBus->head.pNext)) {
ma_node_detach_output_bus(pOutputBus->pNode, pOutputBus->outputBusIndex); /* This won't do any waiting in practice and should be efficient. */
}
}
Expand Down Expand Up @@ -78882,9 +78907,7 @@ MA_PRIVATE ma_bool32 ma_dr_wav_init__internal(ma_dr_wav* pWav, ma_dr_wav_chunk_p
}
}
if (isProcessingMetadata) {
ma_uint64 metadataBytesRead;
metadataBytesRead = ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown);
MA_DR_WAV_ASSERT(metadataBytesRead <= header.sizeInBytes);
ma_dr_wav__metadata_process_chunk(&metadataParser, &header, ma_dr_wav_metadata_type_all_including_unknown);
if (ma_dr_wav__seek_from_start(pWav->onSeek, cursor, pWav->pUserData) == MA_FALSE) {
break;
}
Expand Down
Loading

0 comments on commit a9fe9d4

Please sign in to comment.