使用AudioContext和WebSocket实现实时对讲
作者:互联网
实现一个简单的实时对讲功能,将一台电脑的语音实时传输到另一台电脑并播放。
Socket转发
websocket可以直接转发音频流,无需做更多处理
var WebSocketServer = require('ws').Server
var WebSocket = require('ws')
const wss = new WebSocketServer({ port: 1041 });//服务端口8181
wss.on('connection', function (ws) {
console.log('客户端已连接');
ws.on('message', (data, isBinary) => {
// 收到消息以后,转发给所有连接的客户端
wss.clients.forEach(function each(client) {
if (client !== ws && client.readyState === WebSocket.OPEN) {
client.send(data, { binary: isBinary });
}
});
});
});
通过麦克风获取声音并传输
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<button id="start">start</button>
<button id="stop">startstop</button>
<script>
// 连接 websocket
const ws = new WebSocket('ws://192.168.220.223:1041')
ws.onopen = () => {
console.log('socket 已连接')
}
ws.onerror = (e) => {
console.log('error', e);
}
ws.onclose = () => {
console.log('socket closed')
}
document.getElementById('start').onclick = function () {
// 该变量存储当前MediaStreamAudioSourceNode的引用
// 可以通过它关闭麦克风停止音频传输
let mediaStack
var audioCtx = new AudioContext();
// 创建一个ScriptProcessorNode 用于接收当前麦克风的音频
var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
navigator.mediaDevices.getUserMedia({ audio: true, video: false })
.then(function (stream) {
mediaStack = stream
var source = audioCtx.createMediaStreamSource(stream)
source.connect(scriptNode);
scriptNode.connect(audioCtx.destination);
})
.catch(function (err) {
/* 处理error */
console.log('err', err)
});
// 当麦克风有声音输入时,会调用此事件
// 实际上麦克风始终处于打开状态时,即使不说话,此事件也在一直调用
scriptNode.onaudioprocess = function (audioProcessingEvent) {
var inputBuffer = audioProcessingEvent.inputBuffer;
// 由于只创建了一个音轨,这里只取第一个频道的数据
var inputData = inputBuffer.getChannelData(0);
console.log(inputData);
// 通过socket传输数据,实际上传输的是Float32Array
ws.send(inputData)
}
// 关闭麦克风
document.getElementById('startstop').onclick = function () {
mediaStack.getTracks()[0].stop()
scriptNode.disconnect()
};
}
</script>
</body>
</html>
获取socket传输过来的音频流并播放
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<button onclick="play()">play</button>
<script>
function play() {
const audioCtx = new AudioContext();
// 连接socket
const ws = new WebSocket('ws://127.0.0.1:1041')
ws.onopen = () => {
console.log('socket opened')
}
// 接收的数据类型是arraybuffer
ws.binaryType = 'arraybuffer'
ws.onmessage = ({data}) => {
// 将接收的数据转换成与传输过来的数据相同的Float32Array
const buffer = new Float32Array(data)
// 创建一个空白的AudioBuffer对象,这里的4096跟发送方保持一致,48000是采样率
const myArrayBuffer = audioCtx.createBuffer(1, 4096, 48000);
// 也是由于只创建了一个音轨,可以直接取到0
const nowBuffering = myArrayBuffer.getChannelData(0);
// 通过循环,将接收过来的数据赋值给简单音频对象
for (let i = 0; i < 4096; i++) {
nowBuffering[i] = buffer[i];
}
// 使用AudioBufferSourceNode播放音频
const source = audioCtx.createBufferSource();
source.buffer = myArrayBuffer
source.connect(audioCtx.destination);
source.start();
}
ws.onerror = (e) => {
console.log('error', e);
}
ws.onclose = () => {
console.log('socket closed');
}
}
</script>
</body>
</html>
标签:function,WebSocket,log,audioCtx,实时,ws,console,AudioContext,const 来源: https://www.cnblogs.com/Bin-x/p/16316184.html