摘要:
最近在做音视频相关业务,用的到了webRtc技术,掌握这些方法可以结合业务做,麦克风检测、录制音频,都是可以的;基本操作和其它方法都写好在methods中了;
全局变量
// 后续会创建AnalyserNode对象
let analyser: any = null;
// 后续赋值canvas标签对象
let canvas: any = null;
// canvas 2d对象
let canvasCtx: any = null;
let audioSteam: any = { // 获取音视频MediaStream对象
current: null
}
HTML
<div>
// 绘制音波
<canvas id="devDetectionMicroCanvas" className={style.audio_canvas}></canvas>
// 实时播放
<audio id="devDetectionMicroRef" autoPlay></audio>
</div>
获取音视频流
// 使用getUserMedia API获取音频流。
navigator.mediaDevices.getUserMedia({
audio: true
}).then(stream => {
/* MediaStream赋值到全局对象,后续使用 */
audioSteam.current = stream;
/* 获取当前采集设备名称 */
methods.handleStreamGetMicroName();
/* 把媒体流赋值给audio标签进行试试播放 */
methods.handleAudioPlay();
/* 开始准备制作试试音波 */
methods.handleCanvasAudioContext();
})
.catch(error => {
let errorMessage = error + '';
if (errorMessage.includes('Permission denied')) {
errorMessage = '请开启麦克风权限';
} else if (errorMessage.includes('Requested device not found')) {
errorMessage = '请检测麦克风是否插入';
};
console.log('error', errorMessage)
});
其它方法
const methods = {
// 处理canvas和audioContext
handleCanvasAudioContext() {
// 1. 使用AudioContext API创建一个AudioContext对象,并将音频流添加到它上面
const audioContext = new AudioContext();
const source = audioContext.createMediaStreamSource(stream);
// 2. 创建AnalyserNode对象,并将其连接到source节点。
analyser = audioContext.createAnalyser();
source.connect(analyser);
// 3. 获取实时音频数据并将其转换为可视化数据。例如,可以使用getByteTimeDomainData()方法获取时域数据,并将其转换为音波。
canvas = document.getElementById("devDetectionMicroCanvas");
canvasCtx = canvas.getContext("2d");
// 4.启动波纹动画
methods.drawWaveform();
},
// 波纹动画
drawWaveform() {
const stream = audioSteam.current;
// 获取时域数据
const dataArray = new Uint8Array(analyser.fftSize);
analyser.getByteTimeDomainData(dataArray);
// 绘制音波
canvasCtx.clearRect(0, 0, canvas.width, canvas.height);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "rgb(69, 255, 56)";
canvasCtx.beginPath();
const sliceWidth = canvas.width * 1.0 / analyser.fftSize;
let x = 0;
for(let i = 0; i < analyser.fftSize; i++) {
const v = dataArray[i] / 128.0;
const y = v * canvas.height / 2;
if(i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.stroke();
requestAnimationFrame(methods.drawWaveform);
},
// 获取采集设备名称
handleStreamGetMicroName(){
if (audioSteam.current) {
const audioTrack = audioSteam.current.getAudioTracks()[0];
// audioTrack.label;
console.log('当前采集设备名称', audioTrack.label);
}
},
// 处理audio播放
handleAudioPlay() {
const stream = audioSteam.current;
const elm: HTMLVideoElement | any = document.getElementById('devDetectionMicroRef');
if (elm) {
elm.srcObject = stream;
}
},
// 停止麦克采集
stopAudioSteam() {
if (audioSteam.current) {
audioSteam.current.getTracks().forEach((sender: any) => {
sender.stop();
});
}
},
}
标签:canvas,const,methods,audioSteam,实时,current,音波,canvasCtx,WebRtc
From: https://blog.csdn.net/u014708123/article/details/136849424