前言
經過前面兩篇文章的講解,大家已經瞭解了audio的基本使用方法,下面我們就根據我們瞭解的api做一個直播。
web音訊流轉發之AudioNode
web音訊流轉發之音訊源
原理
- 視訊直播:採集一幀一幀的視訊,轉換為base64轉發,接收到base64後,設定為img的src,然後不停的修改img的src形成視訊
- 音訊直播:採集一幀一幀的音訊二進位制資料,轉發2進位制資料,在接收端對2進位制原始音訊資料進行播放
採集和推流
- 獲取攝像頭,和麥克風需要https
- navigator.getUserMedia已經廢棄,使用navigator.mediaDevices.getUserMedia,當然需要做相容
//獲取音訊視訊流資料
mediaDevices = navigator.mediaDevices.getUserMedia({audio: true,video: { width: 320, height: 240 }});
mediaDevices.then(stream => {
//視訊流轉換到video標籤播放
video.srcObject = stream;
video.play();
//音訊流轉換到AudioNode做資料採集
let source = audioCtx.createMediaStreamSource(stream);
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//採集單聲道資料
let inputBuffer = ev.inputBuffer.getChannelData(0);
//將視訊畫面轉換成base64傳送
ws.send(canvas.toDataURL(`image/jpeg`));
//傳送音訊pcm資料
ws.send(inputBuffer.buffer);
};
});
video.onplay = function(){
//將video繪製到canvas上
interval = setInterval(function(){
ctx.drawImage(video, 0, 0);
},30);
};
接收流檔案
對接收的檔案進行一個快取,以達到一個好的使用者體驗
let ws = new WebSocket("wss://192.168.3.102"),
imgChuncks = [],
audioChuncks = [],
img = null;
//如何處理二進位制資料,預設是Blob
ws.binaryType = `arraybuffer`,
ws.onmessage = function(evt) {
if(evt.data.byteLength === undefined) {
//收到的base64圖片
imgChuncks.push(evt.data);
}else{
//收到的音訊二進位制pcm資料
audioChuncks.push(new Float32Array(evt.data));
}
//快取2幀的資料後開始播放
if(!img && audioChuncks.length > 2){
myplay();
}
};
處理流
//建立播放音訊視訊函式
function myplay(){
//建立img標籤來播放base64圖片
img = new Image();
document.body.appendChild(img);
//建立播放音訊物件
let myBuffer = audioCtx.createBuffer(1, 2048, audioCtx.sampleRate),
source = audioCtx.createBufferSource(),
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//修改img的src達到視訊的效果
img.src = imgChuncks.shift();
//播放audioChuncks裡面真正的二進位制資料
ev.outputBuffer.copyToChannel(audioChuncks.shift() || new Float32Array(2048), 0, 0);
};
}
注意
- 這只是一個例項程式,為進行任何優化
- 在測試時請給揚聲器插上耳機收聽,或者讓揚聲器和麥克風放置到不同的房間。因為沒有做迴音消除,和破音處理,這樣聽上去會很爽。
- 自己生成一個https檔案做測試
完整程式碼
index.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title></title>
<link rel="stylesheet" href="">
<style type="text/css" media="screen">
video, canvas {
background-color: #e9e9e9;
margin:0 auto;
display: block;
}
body {
text-align: center;
}
video {
display: none;
}
</style>
</head>
<body>
<canvas width="320px" height="240px">
</canvas>
<video src="" width="320px" height="240px" controls muted></video>
<button type="button" class="start">開始</button>
</body>
<script type="text/javascript">
let ws = new WebSocket("wss://192.168.3.102"),
imgChuncks = [],
audioChuncks = [],
img = null;
//如何處理二進位制資料,預設是Blob
ws.binaryType = `arraybuffer`,
ws.onmessage = function(evt) {
if(evt.data.byteLength === undefined) {
//收到的base64圖片
imgChuncks.push(evt.data);
}else{
//收到的音訊二進位制pcm資料
audioChuncks.push(new Float32Array(evt.data));
}
//快取2幀的資料後開始播放
if(!img && audioChuncks.length > 2){
myplay();
}
};
//建立播放音訊視訊函式
function myplay(){
//建立img標籤來播放base64圖片
img = new Image();
document.body.appendChild(img);
//建立播放音訊物件
let myBuffer = audioCtx.createBuffer(1, 2048, audioCtx.sampleRate),
source = audioCtx.createBufferSource(),
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//修改img的src達到視訊的效果
img.src = imgChuncks.shift();
//播放audioChuncks裡面真正的二進位制資料
ev.outputBuffer.copyToChannel(audioChuncks.shift() || new Float32Array(2048), 0, 0);
};
}
let video = document.querySelector(`video`),
start = document.querySelector(`.start`),
stop = document.querySelector(`.stop`),
canvas = document.querySelector(`canvas`),
ctx = canvas.getContext(`2d`),
audioCtx = new (window.AudioContext || window.webkitAudioContext)(),
interval = null,
mediaDevices = null;
//點選開始
start.onclick = function(){
//獲取音訊視訊流資料
mediaDevices = navigator.mediaDevices.getUserMedia({audio: true,video: { width: 320, height: 240 }});
mediaDevices.then(stream => {
//視訊流轉換到video標籤播放
video.srcObject = stream;
video.play();
//音訊流轉換到AudioNode做資料採集
let source = audioCtx.createMediaStreamSource(stream);
recorder = audioCtx.createScriptProcessor(2048, 1, 1);
source.connect(recorder);
recorder.connect(audioCtx.destination);
recorder.onaudioprocess = function(ev){
//採集單聲道資料
let inputBuffer = ev.inputBuffer.getChannelData(0);
//將視訊畫面轉換成base64傳送
ws.send(canvas.toDataURL(`image/jpeg`));
//傳送音訊pcm資料
ws.send(inputBuffer.buffer);
};
});
};
video.onplay = function(){
//將video繪製到canvas上
interval = setInterval(function(){
ctx.drawImage(video, 0, 0);
},30);
};
</script>
</html>
servers.js
let https = require(`https`),
fs = require(`fs`),
WebSocket = require(`ws`),
options = {
key: fs.readFileSync(`./key.pem`),
cert:fs.readFileSync(`./key-cert.pem`)
},
server = https.createServer(options, function(req, res){
fs.readFile(`./index.html`, function(err, data){
res.writeHead(200,{`Content-Type`: `text/html`});
res.end(data);
});
}).listen(443, function(){
console.log(`服務啟動成功`)
});
const wss = new WebSocket.Server({server});
wss.binaryType = `arraybuffer`;
wss.on(`connection`, (ws) => {
ws.on(`message`, function(data) {
wss.clients.forEach(function each(client) {
if (client.readyState === WebSocket.OPEN && client !== ws) {
client.send(data);
}
});
});
});