关于chrome:浏览器检测麦克风音量

10次阅读

共计 6052 个字符,预计需要花费 16 分钟才能阅读完成。

开发直播类的 Web 利用时在开播前通常须要检测设施是否失常,本文就来介绍一下如果如何做麦克风音量的可视化。

AudioWorklet 呈现的背景

做这个性能须要用到 Chrome 的 AudioWorklet。

Web Audio API 中的音频解决运行在一个独自的线程,这样才会比拟晦涩。之前提议解决音频应用 audioContext.createScriptProcessor,然而它被设计成了异步的模式,随之而来的问题就是解决会呈现“提早”。

所以 AudioWorklet 就诞生了,用来取代 createScriptProcessor。

AudioWorklet 能够很好的把用户提供的 JS 代码集成到音频解决的线程中,不须要跳到主线程解决音频,这样就保障了 0 提早和同步渲染。

应用条件

应用 Audio Worklet 由两个局部组成: AudioWorkletProcessor 和 AudioWorkletNode.

  • AudioWorkletProcessor 代表了真正的解决音频的 JS 代码,运行在 AudioWorkletGlobalScope 中。
  • AudioWorkletNode 与 AudioWorkletProcessor 对应,起到连贯主线程 AudioNodes 的作用。

编写代码

首先来写 AudioWorkletProcessor,即用于解决音频的逻辑代码,放在一个独自的 js 文件中,命名为 processor.js,它将运行在一个独自的线程。

// processor.js
const SMOOTHING_FACTOR = 0.8

class VolumeMeter extends AudioWorkletProcessor {static get parameterDescriptors() {return []
  }

  constructor() {super()
    this.volume = 0
    this.lastUpdate = currentTime
  }

  calculateVolume(inputs) {const inputChannelData = inputs[0][0]
    let sum = 0

    // Calculate the squared-sum.
    for (let i = 0; i < inputChannelData.length; ++i) {sum += inputChannelData[i] * inputChannelData[i]
    }

    // Calculate the RMS level and update the volume.
    const rms = Math.sqrt(sum / inputChannelData.length)

    this.volume = Math.max(rms, this.volume * SMOOTHING_FACTOR)

    // Post a message to the node every 200ms.
    if (currentTime - this.lastUpdate > 0.2) {this.port.postMessage({ eventType: "volume", volume: this.volume * 100})
      // Store previous time
      this.lastUpdate = currentTime
    }
  }

  process(inputs, outputs, parameters) {this.calculateVolume(inputs)

    return true
  }
}

registerProcessor('vumeter', VolumeMeter); // 注册一个名为 vumeter 的处理函数 留神:与主线程中的名字对应。

封装成一个继承自 AudioWorkletProcessor 的类,VolumeMeter(音量表)。

主线程代码

// 通知用户程序须要应用麦克风
function activeSound () {
    try {
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
        
        navigator.getUserMedia({audio: true, video: false}, onMicrophoneGranted, onMicrophoneDenied);
    } catch(e) {alert(e)
    }
}

async function onMicrophoneGranted(stream) {
    // Initialize AudioContext object
    audioContext = new AudioContext()

    // Creating a MediaStreamSource object and sending a MediaStream object granted by the user
    let microphone = audioContext.createMediaStreamSource(stream)

    await audioContext.audioWorklet.addModule('processor.js')
    // Creating AudioWorkletNode sending
    // context and name of processor registered
    // in vumeter-processor.js
    const node = new AudioWorkletNode(audioContext, 'vumeter')

    // Listing any message from AudioWorkletProcessor in its
    // process method here where you can know
    // the volume level
    node.port.onmessage  = event => {// console.log(event.data.volume) // 在这里就能够获取到 processor.js 检测到的音量值
        handleVolumeCellColor(event.data.volume) // 解决页面成果函数
    }

    // Now this is the way to
    // connect our microphone to
    // the AudioWorkletNode and output from audioContext
    microphone.connect(node).connect(audioContext.destination)
}

function onMicrophoneDenied() {console.log('denied')
}

解决页面展现逻辑

下面的代码咱们曾经能够获取到零碎麦克风的音量了,当初的工作是把它展现在页面上。

筹备页面构造和款式代码:

<style>
    .volume-group {
        width: 200px;
        height: 50px;
        background-color: black;
        display: flex;
        align-items: center;
        gap: 5px;
        padding: 0 10px;
    }
    .volume-cell {
        width: 10px;
        height: 30px;
        background-color: #e3e3e5;
    }
</style>

<div class="volume-group">
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
    <div class="volume-cell"></div>
</div>

渲染逻辑:

/**
 * 该函数用于解决 volume cell 色彩变动
 */
function handleVolumeCellColor(volume) {const allVolumeCells = [...volumeCells]
    const numberOfCells = Math.round(volume)
    const cellsToColored = allVolumeCells.slice(0, numberOfCells)

    for (const cell of allVolumeCells) {cell.style.backgroundColor = "#e3e3e5"}

    for (const cell of cellsToColored) {cell.style.backgroundColor = "#79c545"}
}

残缺代码

上面贴上主线程残缺代码,把它和 processor.js 放在同一目录运行即可。

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>AudioContext</title>
    <style>
        .volume-group {
            width: 200px;
            height: 50px;
            background-color: black;
            display: flex;
            align-items: center;
            gap: 5px;
            padding: 0 10px;
        }
        .volume-cell {
            width: 10px;
            height: 30px;
            background-color: #e3e3e5;
        }
    </style>
</head>
<body>
    <div class="volume-group">
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
        <div class="volume-cell"></div>
    </div>
<script>
    function activeSound () {
        // Tell user that this program wants to use the microphone
        try {
            navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
            
            navigator.getUserMedia({audio: true, video: false}, onMicrophoneGranted, onMicrophoneDenied);
        } catch(e) {alert(e)
        }
    }

    const volumeCells = document.querySelectorAll(".volume-cell")
    
    async function onMicrophoneGranted(stream) {
        // Initialize AudioContext object
        audioContext = new AudioContext()

        // Creating a MediaStreamSource object and sending a MediaStream object granted by the user
        let microphone = audioContext.createMediaStreamSource(stream)

        await audioContext.audioWorklet.addModule('processor.js')
        // Creating AudioWorkletNode sending
        // context and name of processor registered
        // in vumeter-processor.js
        const node = new AudioWorkletNode(audioContext, 'vumeter')

        // Listing any message from AudioWorkletProcessor in its
        // process method here where you can know
        // the volume level
        node.port.onmessage  = event => {// console.log(event.data.volume)
            handleVolumeCellColor(event.data.volume)
        }

        // Now this is the way to
        // connect our microphone to
        // the AudioWorkletNode and output from audioContext
        microphone.connect(node).connect(audioContext.destination)
    }

    function onMicrophoneDenied() {console.log('denied')
    }

    /**
     * 该函数用于解决 volume cell 色彩变动
     */
    function handleVolumeCellColor(volume) {const allVolumeCells = [...volumeCells]
        const numberOfCells = Math.round(volume)
        const cellsToColored = allVolumeCells.slice(0, numberOfCells)

        for (const cell of allVolumeCells) {cell.style.backgroundColor = "#e3e3e5"}

        for (const cell of cellsToColored) {cell.style.backgroundColor = "#79c545"}
    }

    activeSound()
</script>
</body>
</html>

参考文档

Enter Audio Worklet

文章到此结束。如果对你有用的话,欢送点赞,谢谢。

正文完
 0