razorxan
razorxan

Reputation: 516

Web Audio API stream audio tags output via http as they are played

Im working on an Electron app which takes song/voice requests via a Telegram Bot Api interface and play them in audio objects in a jukebox/radio way.

What i'd like to achieve is live-streaming the audio output of my app via http to the clients who connect to the local (nodejs) server.

So basically i need to process all audio tags PCM as they are played, then mix them (maybe convert the result to mp3 format?) and pipe the result to the clients. At least that's my idea for now.

Unfortunately Im stuck on capturing audio objects outputs. I read about RecordJs and how it can record audio from an AudioNode Object but i didnt find yet an example of a mixed multiple audio tag outgoing stream.

Can you help me with this?

Upvotes: 1

Views: 1394

Answers (1)

Scott Stensland
Scott Stensland

Reputation: 28305

When Web Audio API is rendering the audio is raw PCM (uncompressed) which is available in a memory buffer which gets emptied/reloaded based on buffer allocated size - You can intercept and copy this buffer into a process for downstream publication to clients

save below code as an html file then in same dir serve it using

python -m SimpleHTTPServer

point a browser at http://localhost:8000/ and pick your new html file ... the browser must prompt to ack use of microphone ... then view your javascript console ( ctrl-shift-i ) ... here you see the 1st three elements of FFT and time domain audio array buffers ... in code search on

array_time_domain

which is your raw PCM audio (to be copied and sent to subscribed clients (left as an exercise for the reader ;-)) ... comment out the FFT related code if not needed to lower CPU/battery drain

NOTE - the onaudioprocess callback is called repeatedly as audio is pumped through so assure your above mentioned copy process is very efficient so it completes quicker than the cycle period between audio buffer refreshes (hint Web Worker)

Here I use input source audio coming from the microphone. This inner callback rendering event loop is the same irrespective of source audio

<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>

<script type="text/javascript">

var webaudio_tooling_obj = function () {

    var audioContext = new AudioContext();

    console.log("audio is starting up ...");

    var BUFF_SIZE_RENDERER = 16384;
    var SIZE_SHOW = 3; // number of array elements to show in console output

    var audioInput = null,
    microphone_stream = null,
    gain_node = null,
    script_processor_node = null,
    script_processor_analysis_node = null,
    analyser_node = null;

    if (!navigator.getUserMedia)
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
    navigator.mozGetUserMedia || navigator.msGetUserMedia;

    if (navigator.getUserMedia){

        navigator.getUserMedia({audio:true}, 
            function(stream) {
                start_microphone(stream);
            },
            function(e) {
                alert('Error capturing audio.');
            }
            );

    } else { alert('getUserMedia not supported in this browser.'); }

    // ---

    function show_some_data(given_typed_array, num_row_to_display, label) {

        var size_buffer = given_typed_array.length;
        var index = 0;

        console.log("__________ " + label);

        if (label === "time") {

            for (; index < num_row_to_display && index < size_buffer; index += 1) {

                var curr_value_time = (given_typed_array[index] / 128) - 1.0;

                console.log(curr_value_time);
            }

        } else if (label === "frequency") {

            for (; index < num_row_to_display && index < size_buffer; index += 1) {

                console.log(given_typed_array[index]);
            }

        } else {

            throw new Error("ERROR - must pass time or frequency");
        }
    }

    function process_microphone_buffer(event) {

        var i, N, inp, microphone_output_buffer;

        microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
    }

    function start_microphone(stream){

        gain_node = audioContext.createGain();
        gain_node.connect( audioContext.destination );

        microphone_stream = audioContext.createMediaStreamSource(stream);
        microphone_stream.connect(gain_node); 

        script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
        script_processor_node.onaudioprocess = process_microphone_buffer;

        microphone_stream.connect(script_processor_node);

        // --- enable volume control for output speakers

        document.getElementById('volume').addEventListener('change', function() {

            var curr_volume = this.value;
            gain_node.gain.value = curr_volume;

            console.log("curr_volume ", curr_volume);
        });

        // --- setup FFT

        script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
        script_processor_analysis_node.connect(gain_node);

        analyser_node = audioContext.createAnalyser();
        analyser_node.smoothingTimeConstant = 0;
        analyser_node.fftSize = 2048;

        microphone_stream.connect(analyser_node);

        analyser_node.connect(script_processor_analysis_node);

        var buffer_length = analyser_node.frequencyBinCount;

        var array_freq_domain = new Uint8Array(buffer_length);
        var array_time_domain = new Uint8Array(buffer_length);

        console.log("buffer_length " + buffer_length);

        script_processor_analysis_node.onaudioprocess = function() {

            // get the average for the first channel
            analyser_node.getByteFrequencyData(array_freq_domain);
            analyser_node.getByteTimeDomainData(array_time_domain);

            // draw the spectrogram
            if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {

                show_some_data(array_freq_domain, SIZE_SHOW, "frequency");
                show_some_data(array_time_domain, SIZE_SHOW, "time"); // store this to record to aggregate buffer/file
            }
        };
    }

}(); //  webaudio_tooling_obj = function()

</script>

</head>
<body>

    <p>Volume</p>
    <input id="volume" type="range" min="0" max="1" step="0.1" value="0.0"/>

</body>
</html>

Upvotes: 0

Related Questions