/* * soundwave.cm - Standalone audio playback system * * Creates an audio player instance that manages voices and provides * mixed audio output. Platform-agnostic - caller is responsible for * feeding the output to the audio device. * * USAGE: * var soundwave = use('soundwave/soundwave') * var player = soundwave.create({ * sample_rate: 44100, * channels: 2, * frames_per_chunk: 1024 * }) * * // Load and decode audio (caller provides bytes and file path) * var pcm = player.decode(bytes, "mysound.mp3") * * // Play a sound * var voice = player.play(pcm, { loop: true, vol: 0.5 }) * voice.stopped = true // stop it * * // Pull mixed audio frames for output * var blob = player.pull(1024) // returns stoned blob of f32 stereo samples * * OBJECTS: * * Player - Audio player instance * .sample_rate - output sample rate (default 44100) * .channels - output channels (default 2) * .frames_per_chunk- default frames per pull (default 1024) * .decode(bytes, path) - decode audio bytes, returns PCM object * .play(pcm, opts) - play a PCM, returns Voice object * .pull(frames) - pull mixed audio, returns stoned blob * .cleanup() - remove finished voices * * PCM - Decoded audio data * .pcm - stoned blob of f32 stereo samples at player's sample_rate * .channels - channel count (after conversion) * .sample_rate- sample rate (after conversion) * .frames - total frames in pcm blob * .path - source file path * * Voice - A playing instance of a PCM * .source - reference to PCM object * .pos - current frame position (0-indexed) * .vol - volume 0.0-1.0 (default 1.0) * .loop - if true, loops when reaching end * .stopped - set to true to stop playback * .finish_hook- optional callback when voice finishes */ var wav = use('soundwave/wav') var mp3 = use('soundwave/mp3') var flac = use('soundwave/flac') var dsp = use('soundwave/dsp') var samplerate = use('libsamplerate/convert') var Blob = use('blob') var soundwave = {} // Create a new audio player instance soundwave.create = function(opts) { opts = opts || {} var player = { sample_rate: opts.sample_rate || 44100, channels: opts.channels || 2, frames_per_chunk: opts.frames_per_chunk || 1024, voices: [], pcm_cache: {} } var BYTES_PER_SAMPLE = 4 // f32 // Normalize decoded audio to player's output format function normalize_pcm(decoded, path) { var pcm = decoded.pcm var channels = decoded.channels || 1 var rate = decoded.sample_rate || player.sample_rate // Resample if needed if (rate != player.sample_rate) { pcm = samplerate.resample(pcm, rate, player.sample_rate, channels) } // Convert mono to stereo if needed if (channels == 1 && player.channels == 2) { pcm = dsp.mono_to_stereo(pcm) channels = 2 } // Calculate frames var bytes = pcm.length / 8 // blob.length is in bits var frames = bytes / (player.channels * BYTES_PER_SAMPLE) return { pcm: pcm, channels: player.channels, sample_rate: player.sample_rate, frames: frames, path: path } } // Decode audio bytes into PCM // bytes: blob of encoded audio data // path: file path (used to determine format and for caching) player.decode = function(bytes, path) { if (!bytes || !path) return null // Check cache if (player.pcm_cache[path]) return player.pcm_cache[path] var decoded = null if (path.endsWith('.wav')) { decoded = wav.decode(bytes) } else if (path.endsWith('.mp3')) { decoded = mp3.decode(bytes) } else if (path.endsWith('.flac')) { decoded = flac.decode(bytes) } if (decoded && decoded.pcm) { var normalized = normalize_pcm(decoded, path) player.pcm_cache[path] = normalized return normalized } return null } // Pull a chunk of audio from a voice, handling looping function pull_voice_chunk(voice, frames) { if (voice.stopped) return null var source = voice.source var total_frames = source.frames var pos = voice.pos var bytes_per_frame = player.channels * BYTES_PER_SAMPLE var bits_per_frame = bytes_per_frame * 8 var out = new Blob() var frames_written = 0 while (frames_written < frames) { if (pos >= total_frames) { if (voice.loop) { pos = 0 } else { break } } var frames_available = total_frames - pos var frames_needed = frames - frames_written var frames_to_read = frames_available < frames_needed ? frames_available : frames_needed var start_bit = pos * bits_per_frame var end_bit = (pos + frames_to_read) * bits_per_frame var chunk = source.pcm.read_blob(start_bit, end_bit) out.write_blob(chunk) pos += frames_to_read frames_written += frames_to_read } voice.pos = pos // Pad with silence if needed if (frames_written < frames) { var silence_frames = frames - frames_written var silence = dsp.silence(silence_frames, player.channels) out.write_blob(silence) } stone(out) return out } // Remove finished voices player.cleanup = function() { var active = [] for (var i = 0; i < player.voices.length; i++) { var v = player.voices[i] var done = v.stopped || (!v.loop && v.pos >= v.source.frames) if (!done) { active.push(v) } else if (v.finish_hook) { v.finish_hook() } } player.voices = active } // Play a PCM, returns voice object player.play = function(pcm, opts) { if (!pcm) return null var voice = { source: pcm, pos: 0, vol: 1.0, loop: false, stopped: false, finish_hook: null } if (opts) { if (opts.loop != null) voice.loop = opts.loop if (opts.vol != null) voice.vol = opts.vol } player.voices.push(voice) return voice } // Pull mixed audio frames // Returns a stoned blob of f32 samples (channels * frames * 4 bytes) player.pull = function(frames) { frames = frames || player.frames_per_chunk var blobs = [] var vols = [] for (var i = 0; i < player.voices.length; i++) { var v = player.voices[i] if (v.stopped) continue var chunk = pull_voice_chunk(v, frames) if (chunk) { blobs.push(chunk) vols.push(v.vol) } } var mixed if (blobs.length == 0) { mixed = dsp.silence(frames, player.channels) } else { mixed = dsp.mix_blobs(blobs, vols) } player.cleanup() return mixed } // Convenience: get number of active voices player.voice_count = function() { return player.voices.length } return player } return soundwave