gpt4 book ai didi

javascript - HTML 5 : AudioContext AudioBuffer

转载 作者:搜寻专家 更新时间:2023-11-01 05:29:10 25 4
gpt4 key购买 nike

我需要了解音频缓冲区的工作原理,为此我想按以下顺序执行:Microphone-> Auto-> Processor-> Manual-> Buffer-> Auto-> Speakers。 Auto 表示自动数据传输和手动我通过 processor.onaudioprocess 中的代码自己完成。所以我有以下代码:

navigator.getUserMedia = navigator.getUserMedia ||navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext;
var myAudioBuffer;
var microphone;
var speakers;
if (navigator.getUserMedia) {
navigator.getUserMedia(
{audio: true},
function(stream) {
audioContext = new AudioContext();
//STEP 1 - we create buffer and its node
speakers = audioContext.destination;
myAudioBuffer = audioContext.createBuffer(1, 22050, 44100);
var bufferNode = audioContext.createBufferSource();
bufferNode.buffer = myAudioBuffer;
bufferNode.connect(speakers);
bufferNode.start();

//STEP 2- we create microphone and processor
microphone = audioContext.createMediaStreamSource(stream);
var processor = (microphone.context.createScriptProcessor ||
microphone.context.createJavaScriptNode).call(microphone.context,4096, 1, 1);
processor.onaudioprocess = function(audioProcessingEvent) {
var inputBuffer = audioProcessingEvent.inputBuffer;
var inputData = inputBuffer.getChannelData(0); // we have only one channel
var nowBuffering = myAudioBuffer.getChannelData(0);
for (var sample = 0; sample < inputBuffer.length; sample++) {
nowBuffering[sample] = inputData[sample];
}
}

microphone.connect(processor);

},
function() {
console.log("Error 003.")
});
}

但是,此代码不起作用。没有错误,只有沉默。我的错误在哪里?

最佳答案

编辑

既然 OP 肯定想使用缓冲区。我写了一些代码,你可以 try out on JSFiddle .诀窍部分肯定是您必须以某种方式将来自麦克风的输入传递到某个“目的地”以使其得到处理。

navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

// TODO: Figure out what else we need and give the user feedback if he doesn't
// support microphone input.
if (navigator.getUserMedia) {
captureMicrophone();
}

// First Step - Capture microphone and process the input
function captureMicrophone() {
// process input from microphone
const processAudio = ev =>
processBuffer(ev.inputBuffer.getChannelData(CHANNEL));

// setup media stream from microphone
const microphoneStream = stream => {
const microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(processor);
// #1 If we don't pass through to speakers 'audioprocess' won't be triggerd
processor.connect(mute);
};
// TODO: Handle error properly (see todo above - but probably more specific)
const userMediaError = err => console.error(err);

// Second step - Process buffer and output to speakers
const processBuffer = buffer => {
audioBuffer.getChannelData(CHANNEL).set(buffer);
// We could move this out but that would affect audio quality
const source = audioContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(speakers);
source.start();
}

const audioContext = new AudioContext();
const speakers = audioContext.destination;
// We currently only operate on this channel we might need to add a couple
// lines of code if this fact changes
const CHANNEL = 0;
const CHANNELS = 1;
const BUFFER_SIZE = 4096;
const audioBuffer = audioContext.createBuffer(CHANNELS, BUFFER_SIZE, audioContext.sampleRate);

const processor = audioContext.createScriptProcessor(BUFFER_SIZE, CHANNELS, CHANNELS);

// #2 Not needed we could directly pass through to speakers since there's no
// data anyway but just to be sure that we don't output anything
const mute = audioContext.createGain();
mute.gain.value = 0;
mute.connect(speakers);

processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

// #2 Not needed we could directly pass through to speakers since there's no
// data anyway but just to be sure that we don't output anything
const mute = audioContext.createGain();
mute.gain.value = 0;
mute.connect(speakers);

processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

我在那里写的代码看起来很脏。但由于您有一个大型项目,您绝对可以更清晰地构建它。

我不知道你想达到什么目的,但我绝对建议你看看 Recorder.js

上一个答案

您缺少的要点是,您将获得一个传递到 createScriptProcessor 的输出缓冲区,因此您所做的所有 createBuffer 操作都是不必要的。除此之外,您走在正确的轨道上。

这将是一个可行的解决方案。 Try it out on JSFiddle!

navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

if (navigator.getUserMedia) {
captureMicrophone();
}
function captureMicrophone() {
const audioContext = new AudioContext();
const speaker = audioContext.destination;
const processor = audioContext.createScriptProcessor(4096, 1, 1);

const processAudio =
ev => {
const CHANNEL = 0;
const inputBuffer = ev.inputBuffer;
const outputBuffer = ev.outputBuffer;
const inputData = inputBuffer.getChannelData(CHANNEL);
const outputData = outputBuffer.getChannelData(CHANNEL);

// TODO: manually do something with the audio
for (let i = 0; i < inputBuffer.length; ++i) {
outputData[i] = inputData[i];
}
};

const microphoneStream =
stream => {
const microphone = audioContext.createMediaStreamSource(stream);
microphone.connect(processor);
processor.connect(speaker);
};

// TODO: handle error properly
const userMediaError = err => console.error(err);

processor.addEventListener('audioprocess', processAudio);
navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

关于javascript - HTML 5 : AudioContext AudioBuffer,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/38282611/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com