SDL_emscriptenaudio.c (15319B)
1 /* 2 Simple DirectMedia Layer 3 Copyright (C) 1997-2020 Sam Lantinga <slouken@libsdl.org> 4 5 This software is provided 'as-is', without any express or implied 6 warranty. In no event will the authors be held liable for any damages 7 arising from the use of this software. 8 9 Permission is granted to anyone to use this software for any purpose, 10 including commercial applications, and to alter it and redistribute it 11 freely, subject to the following restrictions: 12 13 1. The origin of this software must not be misrepresented; you must not 14 claim that you wrote the original software. If you use this software 15 in a product, an acknowledgment in the product documentation would be 16 appreciated but is not required. 17 2. Altered source versions must be plainly marked as such, and must not be 18 misrepresented as being the original software. 19 3. This notice may not be removed or altered from any source distribution. 20 */ 21 #include "../../SDL_internal.h" 22 23 #if SDL_AUDIO_DRIVER_EMSCRIPTEN 24 25 #include "SDL_audio.h" 26 #include "../SDL_audio_c.h" 27 #include "SDL_emscriptenaudio.h" 28 29 #include <emscripten/emscripten.h> 30 31 static void 32 FeedAudioDevice(_THIS, const void *buf, const int buflen) 33 { 34 const int framelen = (SDL_AUDIO_BITSIZE(this->spec.format) / 8) * this->spec.channels; 35 EM_ASM_ARGS({ 36 var SDL2 = Module['SDL2']; 37 var numChannels = SDL2.audio.currentOutputBuffer['numberOfChannels']; 38 for (var c = 0; c < numChannels; ++c) { 39 var channelData = SDL2.audio.currentOutputBuffer['getChannelData'](c); 40 if (channelData.length != $1) { 41 throw 'Web Audio output buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!'; 42 } 43 44 for (var j = 0; j < $1; ++j) { 45 channelData[j] = HEAPF32[$0 + ((j*numChannels + c) << 2) >> 2]; /* !!! FIXME: why are these shifts here? */ 46 } 47 } 48 }, buf, buflen / framelen); 49 } 50 51 static void 52 HandleAudioProcess(_THIS) 53 { 54 SDL_AudioCallback callback = this->callbackspec.callback; 55 const int stream_len = this->callbackspec.size; 56 57 /* Only do something if audio is enabled */ 58 if (!SDL_AtomicGet(&this->enabled) || SDL_AtomicGet(&this->paused)) { 59 if (this->stream) { 60 SDL_AudioStreamClear(this->stream); 61 } 62 return; 63 } 64 65 if (this->stream == NULL) { /* no conversion necessary. */ 66 SDL_assert(this->spec.size == stream_len); 67 callback(this->callbackspec.userdata, this->work_buffer, stream_len); 68 } else { /* streaming/converting */ 69 int got; 70 while (SDL_AudioStreamAvailable(this->stream) < ((int) this->spec.size)) { 71 callback(this->callbackspec.userdata, this->work_buffer, stream_len); 72 if (SDL_AudioStreamPut(this->stream, this->work_buffer, stream_len) == -1) { 73 SDL_AudioStreamClear(this->stream); 74 SDL_AtomicSet(&this->enabled, 0); 75 break; 76 } 77 } 78 79 got = SDL_AudioStreamGet(this->stream, this->work_buffer, this->spec.size); 80 SDL_assert((got < 0) || (got == this->spec.size)); 81 if (got != this->spec.size) { 82 SDL_memset(this->work_buffer, this->spec.silence, this->spec.size); 83 } 84 } 85 86 FeedAudioDevice(this, this->work_buffer, this->spec.size); 87 } 88 89 static void 90 HandleCaptureProcess(_THIS) 91 { 92 SDL_AudioCallback callback = this->callbackspec.callback; 93 const int stream_len = this->callbackspec.size; 94 95 /* Only do something if audio is enabled */ 96 if (!SDL_AtomicGet(&this->enabled) || SDL_AtomicGet(&this->paused)) { 97 SDL_AudioStreamClear(this->stream); 98 return; 99 } 100 101 EM_ASM_ARGS({ 102 var SDL2 = Module['SDL2']; 103 var numChannels = SDL2.capture.currentCaptureBuffer.numberOfChannels; 104 for (var c = 0; c < numChannels; ++c) { 105 var channelData = SDL2.capture.currentCaptureBuffer.getChannelData(c); 106 if (channelData.length != $1) { 107 throw 'Web Audio capture buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!'; 108 } 109 110 if (numChannels == 1) { /* fastpath this a little for the common (mono) case. */ 111 for (var j = 0; j < $1; ++j) { 112 setValue($0 + (j * 4), channelData[j], 'float'); 113 } 114 } else { 115 for (var j = 0; j < $1; ++j) { 116 setValue($0 + (((j * numChannels) + c) * 4), channelData[j], 'float'); 117 } 118 } 119 } 120 }, this->work_buffer, (this->spec.size / sizeof (float)) / this->spec.channels); 121 122 /* okay, we've got an interleaved float32 array in C now. */ 123 124 if (this->stream == NULL) { /* no conversion necessary. */ 125 SDL_assert(this->spec.size == stream_len); 126 callback(this->callbackspec.userdata, this->work_buffer, stream_len); 127 } else { /* streaming/converting */ 128 if (SDL_AudioStreamPut(this->stream, this->work_buffer, this->spec.size) == -1) { 129 SDL_AtomicSet(&this->enabled, 0); 130 } 131 132 while (SDL_AudioStreamAvailable(this->stream) >= stream_len) { 133 const int got = SDL_AudioStreamGet(this->stream, this->work_buffer, stream_len); 134 SDL_assert((got < 0) || (got == stream_len)); 135 if (got != stream_len) { 136 SDL_memset(this->work_buffer, this->callbackspec.silence, stream_len); 137 } 138 callback(this->callbackspec.userdata, this->work_buffer, stream_len); /* Send it to the app. */ 139 } 140 } 141 } 142 143 144 static void 145 EMSCRIPTENAUDIO_CloseDevice(_THIS) 146 { 147 EM_ASM_({ 148 var SDL2 = Module['SDL2']; 149 if ($0) { 150 if (SDL2.capture.silenceTimer !== undefined) { 151 clearTimeout(SDL2.capture.silenceTimer); 152 } 153 if (SDL2.capture.stream !== undefined) { 154 var tracks = SDL2.capture.stream.getAudioTracks(); 155 for (var i = 0; i < tracks.length; i++) { 156 SDL2.capture.stream.removeTrack(tracks[i]); 157 } 158 SDL2.capture.stream = undefined; 159 } 160 if (SDL2.capture.scriptProcessorNode !== undefined) { 161 SDL2.capture.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {}; 162 SDL2.capture.scriptProcessorNode.disconnect(); 163 SDL2.capture.scriptProcessorNode = undefined; 164 } 165 if (SDL2.capture.mediaStreamNode !== undefined) { 166 SDL2.capture.mediaStreamNode.disconnect(); 167 SDL2.capture.mediaStreamNode = undefined; 168 } 169 if (SDL2.capture.silenceBuffer !== undefined) { 170 SDL2.capture.silenceBuffer = undefined 171 } 172 SDL2.capture = undefined; 173 } else { 174 if (SDL2.audio.scriptProcessorNode != undefined) { 175 SDL2.audio.scriptProcessorNode.disconnect(); 176 SDL2.audio.scriptProcessorNode = undefined; 177 } 178 SDL2.audio = undefined; 179 } 180 if ((SDL2.audioContext !== undefined) && (SDL2.audio === undefined) && (SDL2.capture === undefined)) { 181 SDL2.audioContext.close(); 182 SDL2.audioContext = undefined; 183 } 184 }, this->iscapture); 185 186 #if 0 /* !!! FIXME: currently not used. Can we move some stuff off the SDL2 namespace? --ryan. */ 187 SDL_free(this->hidden); 188 #endif 189 } 190 191 static int 192 EMSCRIPTENAUDIO_OpenDevice(_THIS, void *handle, const char *devname, int iscapture) 193 { 194 SDL_bool valid_format = SDL_FALSE; 195 SDL_AudioFormat test_format; 196 int result; 197 198 /* based on parts of library_sdl.js */ 199 200 /* create context */ 201 result = EM_ASM_INT({ 202 if(typeof(Module['SDL2']) === 'undefined') { 203 Module['SDL2'] = {}; 204 } 205 var SDL2 = Module['SDL2']; 206 if (!$0) { 207 SDL2.audio = {}; 208 } else { 209 SDL2.capture = {}; 210 } 211 212 if (!SDL2.audioContext) { 213 if (typeof(AudioContext) !== 'undefined') { 214 SDL2.audioContext = new AudioContext(); 215 } else if (typeof(webkitAudioContext) !== 'undefined') { 216 SDL2.audioContext = new webkitAudioContext(); 217 } 218 } 219 return SDL2.audioContext === undefined ? -1 : 0; 220 }, iscapture); 221 if (result < 0) { 222 return SDL_SetError("Web Audio API is not available!"); 223 } 224 225 test_format = SDL_FirstAudioFormat(this->spec.format); 226 while ((!valid_format) && (test_format)) { 227 switch (test_format) { 228 case AUDIO_F32: /* web audio only supports floats */ 229 this->spec.format = test_format; 230 231 valid_format = SDL_TRUE; 232 break; 233 } 234 test_format = SDL_NextAudioFormat(); 235 } 236 237 if (!valid_format) { 238 /* Didn't find a compatible format :( */ 239 return SDL_SetError("No compatible audio format!"); 240 } 241 242 /* Initialize all variables that we clean on shutdown */ 243 #if 0 /* !!! FIXME: currently not used. Can we move some stuff off the SDL2 namespace? --ryan. */ 244 this->hidden = (struct SDL_PrivateAudioData *) 245 SDL_malloc((sizeof *this->hidden)); 246 if (this->hidden == NULL) { 247 return SDL_OutOfMemory(); 248 } 249 SDL_zerop(this->hidden); 250 #endif 251 this->hidden = (struct SDL_PrivateAudioData *)0x1; 252 253 /* limit to native freq */ 254 this->spec.freq = EM_ASM_INT_V({ 255 var SDL2 = Module['SDL2']; 256 return SDL2.audioContext.sampleRate; 257 }); 258 259 SDL_CalculateAudioSpec(&this->spec); 260 261 if (iscapture) { 262 /* The idea is to take the capture media stream, hook it up to an 263 audio graph where we can pass it through a ScriptProcessorNode 264 to access the raw PCM samples and push them to the SDL app's 265 callback. From there, we "process" the audio data into silence 266 and forget about it. */ 267 268 /* This should, strictly speaking, use MediaRecorder for capture, but 269 this API is cleaner to use and better supported, and fires a 270 callback whenever there's enough data to fire down into the app. 271 The downside is that we are spending CPU time silencing a buffer 272 that the audiocontext uselessly mixes into any output. On the 273 upside, both of those things are not only run in native code in 274 the browser, they're probably SIMD code, too. MediaRecorder 275 feels like it's a pretty inefficient tapdance in similar ways, 276 to be honest. */ 277 278 EM_ASM_({ 279 var SDL2 = Module['SDL2']; 280 var have_microphone = function(stream) { 281 //console.log('SDL audio capture: we have a microphone! Replacing silence callback.'); 282 if (SDL2.capture.silenceTimer !== undefined) { 283 clearTimeout(SDL2.capture.silenceTimer); 284 SDL2.capture.silenceTimer = undefined; 285 } 286 SDL2.capture.mediaStreamNode = SDL2.audioContext.createMediaStreamSource(stream); 287 SDL2.capture.scriptProcessorNode = SDL2.audioContext.createScriptProcessor($1, $0, 1); 288 SDL2.capture.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) { 289 if ((SDL2 === undefined) || (SDL2.capture === undefined)) { return; } 290 audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0); 291 SDL2.capture.currentCaptureBuffer = audioProcessingEvent.inputBuffer; 292 dynCall('vi', $2, [$3]); 293 }; 294 SDL2.capture.mediaStreamNode.connect(SDL2.capture.scriptProcessorNode); 295 SDL2.capture.scriptProcessorNode.connect(SDL2.audioContext.destination); 296 SDL2.capture.stream = stream; 297 }; 298 299 var no_microphone = function(error) { 300 //console.log('SDL audio capture: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.'); 301 }; 302 303 /* we write silence to the audio callback until the microphone is available (user approves use, etc). */ 304 SDL2.capture.silenceBuffer = SDL2.audioContext.createBuffer($0, $1, SDL2.audioContext.sampleRate); 305 SDL2.capture.silenceBuffer.getChannelData(0).fill(0.0); 306 var silence_callback = function() { 307 SDL2.capture.currentCaptureBuffer = SDL2.capture.silenceBuffer; 308 dynCall('vi', $2, [$3]); 309 }; 310 311 SDL2.capture.silenceTimer = setTimeout(silence_callback, ($1 / SDL2.audioContext.sampleRate) * 1000); 312 313 if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) { 314 navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone); 315 } else if (navigator.webkitGetUserMedia !== undefined) { 316 navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone); 317 } 318 }, this->spec.channels, this->spec.samples, HandleCaptureProcess, this); 319 } else { 320 /* setup a ScriptProcessorNode */ 321 EM_ASM_ARGS({ 322 var SDL2 = Module['SDL2']; 323 SDL2.audio.scriptProcessorNode = SDL2.audioContext['createScriptProcessor']($1, 0, $0); 324 SDL2.audio.scriptProcessorNode['onaudioprocess'] = function (e) { 325 if ((SDL2 === undefined) || (SDL2.audio === undefined)) { return; } 326 SDL2.audio.currentOutputBuffer = e['outputBuffer']; 327 dynCall('vi', $2, [$3]); 328 }; 329 SDL2.audio.scriptProcessorNode['connect'](SDL2.audioContext['destination']); 330 }, this->spec.channels, this->spec.samples, HandleAudioProcess, this); 331 } 332 333 return 0; 334 } 335 336 static int 337 EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl * impl) 338 { 339 int available; 340 int capture_available; 341 342 /* Set the function pointers */ 343 impl->OpenDevice = EMSCRIPTENAUDIO_OpenDevice; 344 impl->CloseDevice = EMSCRIPTENAUDIO_CloseDevice; 345 346 impl->OnlyHasDefaultOutputDevice = 1; 347 348 /* no threads here */ 349 impl->SkipMixerLock = 1; 350 impl->ProvidesOwnCallbackThread = 1; 351 352 /* check availability */ 353 available = EM_ASM_INT_V({ 354 if (typeof(AudioContext) !== 'undefined') { 355 return 1; 356 } else if (typeof(webkitAudioContext) !== 'undefined') { 357 return 1; 358 } 359 return 0; 360 }); 361 362 if (!available) { 363 SDL_SetError("No audio context available"); 364 } 365 366 capture_available = available && EM_ASM_INT_V({ 367 if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) { 368 return 1; 369 } else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') { 370 return 1; 371 } 372 return 0; 373 }); 374 375 impl->HasCaptureSupport = capture_available ? SDL_TRUE : SDL_FALSE; 376 impl->OnlyHasDefaultCaptureDevice = capture_available ? SDL_TRUE : SDL_FALSE; 377 378 return available; 379 } 380 381 AudioBootStrap EMSCRIPTENAUDIO_bootstrap = { 382 "emscripten", "SDL emscripten audio driver", EMSCRIPTENAUDIO_Init, 0 383 }; 384 385 #endif /* SDL_AUDIO_DRIVER_EMSCRIPTEN */ 386 387 /* vi: set ts=4 sw=4 expandtab: */