gpt4 book ai didi

jquery - 使用 Web Audio API,是否可以关闭扬声器的音量,但输入仍然存在

转载 作者:可可西里 更新时间:2023-11-01 13:22:02 25 4
gpt4 key购买 nike

我正在尝试使用 Web Audio API 从笔记本电脑的麦克风录制人声。

录音正常

但是有一个问题,就是语音从扬声器中流出。所以在录音的时候,我会同时听到我说的话。

我正在寻找避免此问题的方法。帮我!谢谢。

最佳答案

下面使用浏览器音量小部件启动 Web Audio API session ,因此只需向下滑动音量 ... Web Audio API 以节点为中心 ... 而不是直接将 audioContext 与麦克风连接我引入gain_node作为中介

    gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );

microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);

script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;

microphone_stream.connect(script_processor_node);

为了对输出扬声器启用音量控制,我定义了一个对该 gain_node 的回调

    document.getElementById('volume').addEventListener('change', function() {

var curr_volume = this.value;
gain_node.gain.value = curr_volume;

console.log("curr_volume ", curr_volume);
});

以上所有内容都打包成一个自包含的 html 文件

<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>

<script type="text/javascript">

var webaudio_tooling_obj = function () {

var audioContext = new AudioContext();

console.log("audio is starting up ...");

var BUFF_SIZE_RENDERER = 16384;
var SIZE_SHOW = 3; // number of array elements to show in console output

var audioInput = null,
microphone_stream = null,
gain_node = null,
script_processor_node = null,
script_processor_analysis_node = null,
analyser_node = null;

if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;

if (navigator.getUserMedia){

navigator.getUserMedia({audio:true},
function(stream) {
start_microphone(stream);
},
function(e) {
alert('Error capturing audio.');
}
);

} else { alert('getUserMedia not supported in this browser.'); }

// ---

function show_some_data(given_typed_array, num_row_to_display, label) {

var size_buffer = given_typed_array.length;
var index = 0;

console.log("__________ " + label);

if (label === "time") {

for (; index < num_row_to_display && index < size_buffer; index += 1) {

var curr_value_time = (given_typed_array[index] / 128) - 1.0;

console.log(curr_value_time);
}

} else if (label === "frequency") {

for (; index < num_row_to_display && index < size_buffer; index += 1) {

console.log(given_typed_array[index]);
}

} else {

throw new Error("ERROR - must pass time or frequency");
}
}

function process_microphone_buffer(event) {

var i, N, inp, microphone_output_buffer;

// not needed for basic feature set
// microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
}

function start_microphone(stream){

gain_node = audioContext.createGain();
gain_node.connect( audioContext.destination );

microphone_stream = audioContext.createMediaStreamSource(stream);
microphone_stream.connect(gain_node);

script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
script_processor_node.onaudioprocess = process_microphone_buffer;

microphone_stream.connect(script_processor_node);

// --- enable volume control for output speakers

document.getElementById('volume').addEventListener('change', function() {

var curr_volume = this.value;
gain_node.gain.value = curr_volume;

console.log("curr_volume ", curr_volume);
});

// --- setup FFT

script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
script_processor_analysis_node.connect(gain_node);

analyser_node = audioContext.createAnalyser();
analyser_node.smoothingTimeConstant = 0;
analyser_node.fftSize = 2048;

microphone_stream.connect(analyser_node);

analyser_node.connect(script_processor_analysis_node);

var buffer_length = analyser_node.frequencyBinCount;

var array_freq_domain = new Uint8Array(buffer_length);
var array_time_domain = new Uint8Array(buffer_length);

console.log("buffer_length " + buffer_length);

script_processor_analysis_node.onaudioprocess = function() {

// get the average for the first channel
analyser_node.getByteFrequencyData(array_freq_domain);
analyser_node.getByteTimeDomainData(array_time_domain);

// draw the spectrogram
if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {

show_some_data(array_freq_domain, SIZE_SHOW, "frequency");
show_some_data(array_time_domain, SIZE_SHOW, "time"); // store this to record to aggregate buffer/file
}
};
}

}(); // webaudio_tooling_obj = function()

</script>

</head>
<body>

<p>Volume</p>
<input id="volume" type="range" min="0" max="1" step="0.1" value="0.0"/>

</body>
</html>

关于jquery - 使用 Web Audio API,是否可以关闭扬声器的音量,但输入仍然存在,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/45503318/

25 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com