如何处理来自用户麦克风的音频

François Beaufort
François Beaufort

在 Web 平台上,可通过 Media Capture and Streams API 访问用户的摄像头和麦克风。getUserMedia() 方法提示用户使用摄像头和/或麦克风,以作为媒体流进行捕获。然后,可以使用提供极低延迟音频处理的 AudioWorklet,在单独的网络音频线程中处理此数据流。

以下示例展示了如何高效处理来自用户麦克风的音频。

let stream;

startMicrophoneButton.addEventListener("click", async () => {
  // Prompt the user to use their microphone.
  stream = await navigator.mediaDevices.getUserMedia({
    audio: true,
  });
  const context = new AudioContext();
  const source = context.createMediaStreamSource(stream);

  // Load and execute the module script.
  await context.audioWorklet.addModule("processor.js");
  // Create an AudioWorkletNode. The name of the processor is the
  // one passed to registerProcessor() in the module script.
  const processor = new AudioWorkletNode(context, "processor");

  source.connect(processor).connect(context.destination);
  log("Your microphone audio is being used.");
});

stopMicrophoneButton.addEventListener("click", () => {
  // Stop the stream.
  stream.getTracks().forEach(track => track.stop());

  log("Your microphone audio is not used anymore.");
});
// processor.js
// This file is evaluated in the audio rendering thread
// upon context.audioWorklet.addModule() call.

class Processor extends AudioWorkletProcessor {
  process([input], [output]) {
    // Copy inputs to outputs.
    output[0].set(input[0]);
    return true;
  }
}

registerProcessor("processor", Processor);

浏览器支持

MediaDevices.getUserMedia()

浏览器支持

  • 53
  • 12
  • 36
  • 11

来源

网络音频

浏览器支持

  • 35
  • 12
  • 25
  • 14.1

来源

AudioWorklet

浏览器支持

  • 66
  • 79
  • 76
  • 14.1

来源

深入阅读

演示

HTML

<!DOCTYPE html>
<html lang="en">
  <head>
    <meta charset="utf-8" />
    <meta name="viewport" content="width=device-width, initial-scale=1" />
    <link
      rel="icon"
      href="data:image/svg+xml,<svg xmlns=%22http://www.w3.org/2000/svg%22 viewBox=%220 0 100 100%22><text y=%22.9em%22 font-size=%2290%22>🎙️</text></svg>"
    />
    <title>How to process audio from the user's microphone</title>
  </head>
  <body>
    <h1>How to process audio from the user's microphone</h1>
    <button id="startMicrophoneButton">Start using microphone</button>
    <button id="stopMicrophoneButton" disabled>Stop using microphone</button>
    <pre id="logs"></pre>
  </body>
</html>

CSS


        :root {
  color-scheme: dark light;
}
html {
  box-sizing: border-box;
}
*,
*:before,
*:after {
  box-sizing: inherit;
}
body {
  margin: 1rem;
  font-family: system-ui, sans-serif;
}
button {
  display: block;
  margin-bottom: 4px;
}
pre {
  color: red;
  white-space: pre-line;
}
        

JS


        const startMicrophoneButton = document.querySelector('#startMicrophoneButton');
const stopMicrophoneButton = document.querySelector('#stopMicrophoneButton');

let stream;

startMicrophoneButton.addEventListener("click", async () => {
  // Prompt the user to use their microphone.
  stream = await navigator.mediaDevices.getUserMedia({
    audio: true,
  });
  const context = new AudioContext();
  const source = context.createMediaStreamSource(stream);

  // Load and execute the module script.
  await context.audioWorklet.addModule("processor.js");
  // Create an AudioWorkletNode. The name of the processor is the
  // one passed to registerProcessor() in the module script.
  const processor = new AudioWorkletNode(context, "processor");

  source.connect(processor).connect(context.destination);

  stopMicrophoneButton.disabled = false;
  log("Your microphone audio is being used.");
});

stopMicrophoneButton.addEventListener("click", () => {
  // Stop the stream.
  stream.getTracks().forEach(track => track.stop());

  log("Your microphone audio is not used anymore.");
});