首页
学习
活动
专区
圈层
工具
发布
首页
学习
活动
专区
圈层
工具
MCP广场
社区首页 >问答首页 >HTML5: AudioContext AudioBuffer

HTML5: AudioContext AudioBuffer
EN

Stack Overflow用户
提问于 2016-07-09 14:00:46
回答 2查看 3.1K关注 0票数 6

我需要了解音频缓冲区是如何工作的,为了做到这一点,我想做以下几个序列:Microphone-> Auto-> Processor-> Manual-> Buffer-> Auto-> Speakers。自动的意思是自动数据传输和手动我自己通过代码在processor.onaudioprocess。因此,我有以下代码:

代码语言:javascript
运行
复制
navigator.getUserMedia = navigator.getUserMedia ||navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext;
var myAudioBuffer;
var microphone;
var speakers;
if (navigator.getUserMedia) {
    navigator.getUserMedia(
        {audio: true}, 
        function(stream) {
            audioContext = new AudioContext();
            //STEP 1 - we create buffer and its node
            speakers = audioContext.destination;
            myAudioBuffer = audioContext.createBuffer(1, 22050, 44100);
            var bufferNode = audioContext.createBufferSource();
            bufferNode.buffer = myAudioBuffer;
            bufferNode.connect(speakers);
            bufferNode.start();

            //STEP 2- we create microphone and processor
            microphone = audioContext.createMediaStreamSource(stream);
            var processor = (microphone.context.createScriptProcessor || 
                microphone.context.createJavaScriptNode).call(microphone.context,4096, 1, 1);
            processor.onaudioprocess = function(audioProcessingEvent) {
                var inputBuffer = audioProcessingEvent.inputBuffer;
                var inputData = inputBuffer.getChannelData(0); // we have only one channel
                var nowBuffering = myAudioBuffer.getChannelData(0);
                for (var sample = 0; sample < inputBuffer.length; sample++) {
                  nowBuffering[sample] = inputData[sample];
                }
            }

            microphone.connect(processor);                    

        },
        function() {
            console.log("Error 003.")
        });
}

但是,这段代码不起作用。没有错误,只有沉默。我的错误在哪里?

EN

回答 2

Stack Overflow用户

回答已采纳

发布于 2016-07-10 04:37:59

你是沉默了(即你的进程正在被调用,但是缓冲区是空的)还是什么都没有(即你的进程从来没有被调用)?

如果是后者,请尝试将脚本处理器连接到context.destination。即使您不使用输出,一些实现目前也需要这种连接来获取数据。

票数 2
EN

Stack Overflow用户

发布于 2016-07-11 22:10:43

编辑

所以因为OP肯定想要使用一个缓冲区。我编写了更多的代码,您可以使用try out on JSFiddle。诀窍在于,你必须将输入从麦克风传递到某个“目的地”,才能让它进行处理。

代码语言:javascript
运行
复制
navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

// TODO: Figure out what else we need and give the user feedback if he doesn't
// support microphone input.
if (navigator.getUserMedia) {
  captureMicrophone();
}

// First Step - Capture microphone and process the input
function captureMicrophone() {
  // process input from microphone
  const processAudio = ev =>
      processBuffer(ev.inputBuffer.getChannelData(CHANNEL));

  // setup media stream from microphone
  const microphoneStream = stream => {
    const microphone = audioContext.createMediaStreamSource(stream);
    microphone.connect(processor);
    // #1 If we don't pass through to speakers 'audioprocess' won't be triggerd
    processor.connect(mute);
  };
  // TODO: Handle error properly (see todo above - but probably more specific)
  const userMediaError = err => console.error(err);

  // Second step - Process buffer and output to speakers
  const processBuffer = buffer => {
    audioBuffer.getChannelData(CHANNEL).set(buffer);
    // We could move this out but that would affect audio quality
    const source = audioContext.createBufferSource();
    source.buffer = audioBuffer;
    source.connect(speakers);
    source.start();
  }

  const audioContext = new AudioContext();
  const speakers = audioContext.destination;
  // We currently only operate on this channel we might need to add a couple
  // lines of code if this fact changes
  const CHANNEL = 0;
  const CHANNELS = 1;
  const BUFFER_SIZE = 4096;
  const audioBuffer = audioContext.createBuffer(CHANNELS, BUFFER_SIZE, audioContext.sampleRate);

  const processor = audioContext.createScriptProcessor(BUFFER_SIZE, CHANNELS, CHANNELS);

  // #2 Not needed we could directly pass through to speakers since there's no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

  // #2 Not needed we could directly pass through to speakers since there's no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

我在那里写的代码在我看来很脏。但是,由于您有一个大型项目,您肯定可以更干净地构造它。

我不知道你想实现什么,但我也绝对建议你看看Recorder.js

先前的回答

您缺少的主要一点是,您将获得一个输出缓冲区传递到createScriptProcessor,因此您所做的所有createBuffer操作都是不必要的。除此之外,你走在正确的轨道上。

这将是一个可行的解决办法。Try it out on JSFiddle!

代码语言:javascript
运行
复制
navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

if (navigator.getUserMedia) {
  captureMicrophone();
}
function captureMicrophone() {
  const audioContext = new AudioContext();
  const speaker = audioContext.destination;
  const processor = audioContext.createScriptProcessor(4096, 1, 1);

  const processAudio =
      ev => {
        const CHANNEL = 0;
        const inputBuffer = ev.inputBuffer;
        const outputBuffer = ev.outputBuffer;
        const inputData = inputBuffer.getChannelData(CHANNEL);
        const outputData = outputBuffer.getChannelData(CHANNEL);

        // TODO: manually do something with the audio
        for (let i = 0; i < inputBuffer.length; ++i) {
          outputData[i] = inputData[i];
        }
      };

  const microphoneStream =
      stream => {
        const microphone = audioContext.createMediaStreamSource(stream);
        microphone.connect(processor);
        processor.connect(speaker);
      };

  // TODO: handle error properly
  const userMediaError = err => console.error(err);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}
票数 5
EN
页面原文内容由Stack Overflow提供。腾讯云小微IT领域专用引擎提供翻译支持
原文链接:

https://stackoverflow.com/questions/38282611

复制
相关文章

相似问题

领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档