user5182503
user5182503

Reputation:

HTML 5: AudioContext AudioBuffer

I need to understand how audio buffer works and to do it I want to make the following sequence: Microphone-> Auto-> Processor-> Manual-> Buffer-> Auto-> Speakers. Auto means auto data transfer and manual I do myself via the code in processor.onaudioprocess. So I have the following code:

navigator.getUserMedia = navigator.getUserMedia ||navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext;
var myAudioBuffer;
var microphone;
var speakers;
if (navigator.getUserMedia) {
    navigator.getUserMedia(
        {audio: true}, 
        function(stream) {
            audioContext = new AudioContext();
            //STEP 1 - we create buffer and its node
            speakers = audioContext.destination;
            myAudioBuffer = audioContext.createBuffer(1, 22050, 44100);
            var bufferNode = audioContext.createBufferSource();
            bufferNode.buffer = myAudioBuffer;
            bufferNode.connect(speakers);
            bufferNode.start();

            //STEP 2- we create microphone and processor
            microphone = audioContext.createMediaStreamSource(stream);
            var processor = (microphone.context.createScriptProcessor || 
                microphone.context.createJavaScriptNode).call(microphone.context,4096, 1, 1);
            processor.onaudioprocess = function(audioProcessingEvent) {
                var inputBuffer = audioProcessingEvent.inputBuffer;
                var inputData = inputBuffer.getChannelData(0); // we have only one channel
                var nowBuffering = myAudioBuffer.getChannelData(0);
                for (var sample = 0; sample < inputBuffer.length; sample++) {
                  nowBuffering[sample] = inputData[sample];
                }
            }

            microphone.connect(processor);                    

        },
        function() {
            console.log("Error 003.")
        });
}

However, this code doesn't work. No errors, only silence. Where is my mistake?

Upvotes: 6

Views: 3749

Answers (2)

mash
mash

Reputation: 2526

EDIT

So since the OP definitely wants to use a buffer. I wrote some more code which you can try out on JSFiddle. The trick part definitely was that you somehow have to pass the input from the microphone through to some "destination" to get it to process.

navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

// TODO: Figure out what else we need and give the user feedback if he doesn't
// support microphone input.
if (navigator.getUserMedia) {
  captureMicrophone();
}

// First Step - Capture microphone and process the input
function captureMicrophone() {
  // process input from microphone
  const processAudio = ev =>
      processBuffer(ev.inputBuffer.getChannelData(CHANNEL));

  // setup media stream from microphone
  const microphoneStream = stream => {
    const microphone = audioContext.createMediaStreamSource(stream);
    microphone.connect(processor);
    // #1 If we don't pass through to speakers 'audioprocess' won't be triggerd
    processor.connect(mute);
  };
  // TODO: Handle error properly (see todo above - but probably more specific)
  const userMediaError = err => console.error(err);

  // Second step - Process buffer and output to speakers
  const processBuffer = buffer => {
    audioBuffer.getChannelData(CHANNEL).set(buffer);
    // We could move this out but that would affect audio quality
    const source = audioContext.createBufferSource();
    source.buffer = audioBuffer;
    source.connect(speakers);
    source.start();
  }

  const audioContext = new AudioContext();
  const speakers = audioContext.destination;
  // We currently only operate on this channel we might need to add a couple
  // lines of code if this fact changes
  const CHANNEL = 0;
  const CHANNELS = 1;
  const BUFFER_SIZE = 4096;
  const audioBuffer = audioContext.createBuffer(CHANNELS, BUFFER_SIZE, audioContext.sampleRate);

  const processor = audioContext.createScriptProcessor(BUFFER_SIZE, CHANNELS, CHANNELS);

  // #2 Not needed we could directly pass through to speakers since there's no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

  // #2 Not needed we could directly pass through to speakers since there's no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

The code I wrote up there looks quite dirty to me. But since you have a large project you can definitely structure it much more cleanly.

I've no clue what you're trying to achieve but I definitely also recommend to have a look at Recorder.js

Previous answer

The main point you're missing is that you'll get an output buffer passed into createScriptProcessor so all the createBuffer stuff you do is unnecessary. Apart from that you're on the right track.

This would be a working solution. Try it out on JSFiddle!

navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

if (navigator.getUserMedia) {
  captureMicrophone();
}
function captureMicrophone() {
  const audioContext = new AudioContext();
  const speaker = audioContext.destination;
  const processor = audioContext.createScriptProcessor(4096, 1, 1);

  const processAudio =
      ev => {
        const CHANNEL = 0;
        const inputBuffer = ev.inputBuffer;
        const outputBuffer = ev.outputBuffer;
        const inputData = inputBuffer.getChannelData(CHANNEL);
        const outputData = outputBuffer.getChannelData(CHANNEL);

        // TODO: manually do something with the audio
        for (let i = 0; i < inputBuffer.length; ++i) {
          outputData[i] = inputData[i];
        }
      };

  const microphoneStream =
      stream => {
        const microphone = audioContext.createMediaStreamSource(stream);
        microphone.connect(processor);
        processor.connect(speaker);
      };

  // TODO: handle error properly
  const userMediaError = err => console.error(err);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

Upvotes: 6

cwilso
cwilso

Reputation: 13928

Are you getting silence (i.e. your onprocess is getting called, but the buffers are empty) or nothing (i.e. your onprocess is never getting called)?

If the latter, try connecting the scriptprocessor to the context.destination. Even if you don't use the output, some implementations currently need that connection to pull data through.

Upvotes: 1

Related Questions