gpt4 book ai didi

javascript - 如何使用 AudioWorklet 获取麦克风音量

转载 作者:行者123 更新时间:2023-12-03 06:56:56 40 4
gpt4 key购买 nike

我有兴趣在 Javscript 中连续读取麦克风音量。 StackOverflow 上的许多现有解决方案(参见 hereherehere )利用 BaseAudioContext.createScriptProcessor() 自 2014 年起已弃用。
我想在我的项目中使用面向 future 的代码,所以任何人都可以分享一个现代的最小示例,说明如何使用新的 AudioWorkletNode 读取麦克风音量?

最佳答案

让我们看一下需要了解的一些要点:

  • 这一切的改变都是为了避免延迟,创建自己的线程,也就是说,运行在音频渲染线程(AudioWorkletGlobalScope)上。
  • 这种新的实现方式有两部分:AudioWorkletProcessor 和 AudioWorkletNode。
  • AudioWorkletNode 至少需要两件事:一个 AudioContext 对象和作为字符串的处理器名称。处理器定义可以通过新的 Audio Worklet 对象的 addModule () 调用来加载和注册。
  • 包括 AudioWorklet 在内的 Worklet API 仅在安全上下文中可用。在这种情况下,我们可以使用 localhost,但有必要知道这一点。
  • 我们需要至少从 AudioWorkletProcessor 向 AudioWorkletNode 传达当前值,或者在本例中是音量以对其进行任何操作。
  • 必须使用 navigator.getUserMedia 访问您计算机的麦克风

  • /** Declare a context for AudioContext object */
    let audioContext
    // Creating a list of colors for led
    const ledColor = [
    "#064dac",
    "#064dac",
    "#064dac",
    "#06ac5b",
    "#15ac06",
    "#4bac06",
    "#80ac06",
    "#acaa06",
    "#ac8b06",
    "#ac5506",
    ]
    let isFirtsClick = true
    let listeing = false

    function onMicrophoneDenied() {
    console.log('denied')
    }

    /**
    * This method updates leds
    * depending the volume detected
    *
    * @param {Float} vol value of volume detected from microphone
    */
    function leds(vol) {
    let leds = [...document.getElementsByClassName('led')]
    let range = leds.slice(0, Math.round(vol))

    for (var i = 0; i < leds.length; i++) {
    leds[i].style.boxShadow = "-2px -2px 4px 0px #a7a7a73d, 2px 2px 4px 0px #0a0a0e5e";
    leds[i].style.height = "22px"
    }

    for (var i = 0; i < range.length; i++) {
    range[i].style.boxShadow = `5px 2px 5px 0px #0a0a0e5e inset, -2px -2px 1px 0px #a7a7a73d inset, -2px -2px 30px 0px ${ledColor[i]} inset`;
    range[i].style.height = "25px"
    }
    }

    /**
    * Method used to create a comunication between
    * AudioWorkletNode, Microphone and AudioWorkletProcessor
    *
    * @param {MediaStream} stream If user grant access to microphone, this gives you
    * a MediaStream object necessary in this implementation
    */
    async function onMicrophoneGranted(stream) {
    // Instanciate just in the first time
    // when button is pressed
    if (isFirtsClick) {
    // Initialize AudioContext object
    audioContext = new AudioContext()

    // Adding an AudioWorkletProcessor
    // from another script with addModule method
    await audioContext.audioWorklet.addModule('vumeter-processor.js')

    // Creating a MediaStreamSource object
    // and sending a MediaStream object granted by
    // the user
    let microphone = audioContext.createMediaStreamSource(stream)

    // Creating AudioWorkletNode sending
    // context and name of processor registered
    // in vumeter-processor.js
    const node = new AudioWorkletNode(audioContext, 'vumeter')

    // Listing any message from AudioWorkletProcessor in its
    // process method here where you can know
    // the volume level
    node.port.onmessage = event => {
    let _volume = 0
    let _sensibility = 5 // Just to add any sensibility to our ecuation
    if (event.data.volume)
    _volume = event.data.volume;
    leds((_volume * 100) / _sensibility)
    }

    // Now this is the way to
    // connect our microphone to
    // the AudioWorkletNode and output from audioContext
    microphone.connect(node).connect(audioContext.destination)

    isFirtsClick = false
    }

    // Just to know if button is on or off
    // and stop or resume the microphone listening
    let audioButton = document.getElementsByClassName('audio-control')[0]
    if (listeing) {
    audioContext.suspend()
    audioButton.style.boxShadow = "-2px -2px 4px 0px #a7a7a73d, 2px 2px 4px 0px #0a0a0e5e"
    audioButton.style.fontSize = "25px"
    } else {
    audioContext.resume()
    audioButton.style.boxShadow = "5px 2px 5px 0px #0a0a0e5e inset, -2px -2px 1px 0px #a7a7a73d inset"
    audioButton.style.fontSize = "24px"
    }

    listeing = !listeing
    }

    function activeSound () {
    // Tell user that this
    // program wants to use
    // the microphone
    try {
    navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

    navigator.getUserMedia(
    { audio: true, video: false },
    onMicrophoneGranted,
    onMicrophoneDenied
    );
    } catch(e) {
    alert(e)
    }
    }

    document.getElementById('audio').addEventListener('click', () => {
    activeSound()
    })
    在本节中,您可以知道麦克风的音量:
    const SMOOTHING_FACTOR = 0.8;
    const MINIMUM_VALUE = 0.00001;

    // This is the way to register an AudioWorkletProcessor
    // it's necessary to declare a name, in this case
    // the name is "vumeter"
    registerProcessor('vumeter', class extends AudioWorkletProcessor {

    _volume
    _updateIntervalInMS
    _nextUpdateFrame

    constructor () {
    super();
    this._volume = 0;
    this._updateIntervalInMS = 25;
    this._nextUpdateFrame = this._updateIntervalInMS;
    this.port.onmessage = event => {
    if (event.data.updateIntervalInMS)
    this._updateIntervalInMS = event.data.updateIntervalInMS;
    }
    }

    get intervalInFrames () {
    return this._updateIntervalInMS / 1000 * sampleRate;
    }

    process (inputs, outputs, parameters) {
    const input = inputs[0];

    // Note that the input will be down-mixed to mono; however, if no inputs are
    // connected then zero channels will be passed in.
    if (input.length > 0) {
    const samples = input[0];
    let sum = 0;
    let rms = 0;

    // Calculated the squared-sum.
    for (let i = 0; i < samples.length; ++i)
    sum += samples[i] * samples[i];

    // Calculate the RMS level and update the volume.
    rms = Math.sqrt(sum / samples.length);
    this._volume = Math.max(rms, this._volume * SMOOTHING_FACTOR);

    // Update and sync the volume property with the main thread.
    this._nextUpdateFrame -= samples.length;
    if (this._nextUpdateFrame < 0) {
    this._nextUpdateFrame += this.intervalInFrames;
    this.port.postMessage({volume: this._volume});
    }
    }

    return true;
    }
    });
    最后这是 html,您可以在其中显示检测到的音量:
    <div class="container">
    <span>Microphone</span>
    <div class="volumen-wrapper">
    <div class="led"></div>
    <div class="led"></div>
    <div class="led"></div>
    <div class="led"></div>
    <div class="led"></div>

    <div class="led"></div>
    <div class="led"></div>
    <div class="led"></div>
    <div class="led"></div>
    <div class="led"></div>
    </div>

    <div class="control-audio-wrapper">
    <div id="audio" class="audio-control">&#127908;</div>
    </div>
    </div>
    <script type="module" src="./index.js"></script>
    这是结果
    enter image description here
    这是我的实现
    codepen
    资料来源:
  • Enter to Audio worklet
  • Web audio
  • w3.org/webaudio
  • 关于javascript - 如何使用 AudioWorklet 获取麦克风音量,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/62702721/

    40 4 0
    Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
    广告合作:1813099741@qq.com 6ren.com