主要参考了这两篇博客
https://blog.csdn.net/zhang45362613/article/details/112538607 这一篇的采集方案,采集到语音数据后上传服务器
https://blog.csdn.net/qq422243639/article/details/79238983 这一篇的自动播放方案,websocket接收到数据后,立即播放 其实这一篇涵盖了采集的方案,但是代码有许多过时的语句,也就没有采用
前端代码:
其中有很多需要调用的js文件,可以直接去git这个项目,里面都有 https://github.com/xiangyuecn/Recorder
<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0">
<title>测试音频</title>
<script>
</script>
<script src="js/recorder-core.js"></script>
<script src="js/mp3.js"></script>
<script src="js/mp3-engine.js"></script>
<script src="js/wav.js"></script>
<script src="js/frequency.histogram.view.js"></script>
<script src="js/lib.fft.js"></script>
</head>
<body>
<div class="main">
<div class="mainBox">
<div class="pd btns">
<div>
<button onclick="recOpen()" style="margin-right:10px">打开录音,请求权限</button>
<button onclick="recClose()" style="margin-right:0">关闭录音,释放资源</button>
</div>
<button onclick="recStart()">录制</button>
<button onclick="recStop()" style="margin-right:80px">停止</button>
</div>
<div class="pd recpower">
<div style="height:40px;width:300px;background:#999;position:relative;">
<div class="recpowerx" style="height:40px;background:#0B1;position:absolute;"></div>
<div class="recpowert" style="padding-left:50px; line-height:40px; position: relative;"></div>
</div>
</div>
<div class="pd waveBox">
<div style="border:1px solid #ccc;display:inline-block"><div style="height:100px;width:300px;" class="recwave"></div></div>
</div>
</div>
</div>
<script>
let websocketUrl = "ws://"+window.location.host+"/voiceChat/voice";
var testSampleRate=16000;
var testBitRate=16;
var SendInterval=300;
var RealTimeSendTryReset=function(){
realTimeSendTryTime=0;
};
var realTimeSendTryTime=0;
var realTimeSendTryNumber;
var transferUploadNumberMax;
var realTimeSendTryBytesChunks = [];
var realTimeSendTryClearPrevBufferIdx;
var realTimeSendTryWavTestBuffers;
var realTimeSendTryWavTestSampleRate;
var blob=null,meta=null;
var chunkData =null;
var RealTimeSendTry=function(chunkBytes,isClose){
if(chunkBytes){
realTimeSendTryBytesChunks.push(chunkBytes);
}
let t1=Date.now();
if(!isClose && t1-realTimeSendTryTime<SendInterval){
return;
}
realTimeSendTryTime=t1;
var number=++realTimeSendTryNumber;
var len=0;
for(var i=0;i<realTimeSendTryBytesChunks.length;i++){
len+=realTimeSendTryBytesChunks[i].length;
}
chunkData= new Uint8Array(len);
for(var i=0,idx=0;i<realTimeSendTryBytesChunks.length;i++){
var chunk=realTimeSendTryBytesChunks[i];
chunkData.set(chunk,idx);
idx+=chunk.length;
}
meta={};
if(chunkData.length>0){
blob=new Blob([chunkData],{type:"audio/mp3"});
meta=Recorder.mp3ReadMeta([chunkData.buffer],chunkData.length)||{};
}
TransferUpload(number
,blob
,meta.duration||0
,{set:{
type:"mp3"
,sampleRate:meta.sampleRate
,bitRate:meta.bitRate
}}
,isClose
);
realTimeSendTryBytesChunks = null;
chunkData = null;
meta = null;
blob = null;
realTimeSendTryBytesChunks = [];
};
var RealTimeOnProcessClear=function(buffers,powerLevel,bufferDuration,bufferSampleRate,newBufferIdx,asyncEnd){
if(realTimeSendTryTime==0){
realTimeSendTryTime=Date.now();
realTimeSendTryNumber=0;
transferUploadNumberMax=0;
realTimeSendTryBytesChunks=[];
realTimeSendTryClearPrevBufferIdx=0;
realTimeSendTryWavTestSampleRate=0;
}
for(var i=realTimeSendTryClearPrevBufferIdx;i<newBufferIdx;i++){
buffers[i]=null;
}
buffers=null;
realTimeSendTryClearPrevBufferIdx=newBufferIdx;
};
var TransferUpload=function(number,blobOrNull,duration,blobRec,isClose){
transferUploadNumberMax=Math.max(transferUploadNumberMax,number);
if(blobOrNull){
ws.send(blobOrNull);
}
if(isClose){
ws.close();
console.info("No."+(number<100?("000"+number).substr(-3):number)+":已停止传输");
}
};
var rec;
var wave=null;
function recStart(){
if(rec){
rec.close();
}
rec=Recorder({
type:"mp3"
,sampleRate:testSampleRate
,bitRate:testBitRate
,onProcess:function(buffers,powerLevel,bufferDuration,bufferSampleRate,newBufferIdx,asyncEnd){
wave.input(buffers[buffers.length-1],powerLevel,bufferSampleRate);
RealTimeOnProcessClear(buffers,powerLevel,bufferDuration,bufferSampleRate,newBufferIdx,asyncEnd);
}
,takeoffEncodeChunk:function(chunkBytes){
RealTimeSendTry(chunkBytes,false);
}
});
var t=setTimeout(function(){
console.info("无法录音:权限请求被忽略(超时假装手动点击了确认对话框)",1);
},8000);
rec.open(function(){
clearTimeout(t);
rec.start();
useWebSocket();
wave=Recorder.FrequencyHistogramView({elem:".recwave"});
RealTimeSendTryReset();
},function(msg,isUserNotAllow){
clearTimeout(t);
console.info((isUserNotAllow?"UserNotAllow,":"")+"无法录音:"+msg, 1);
});
};
function recStop(){
rec.close();
RealTimeSendTry(null,true);
};
var ws = null;
function useWebSocket() {
ws = new WebSocket(websocketUrl);
ws.binaryType = 'arraybuffer';
ws.onopen = function () {
console.log('握手成功');
if (ws.readyState == 1) {
console.log('连接状态成功');
rec.start();
}
};
var audioContext = new AudioContext();
ws.onmessage = function (msg) {
console.info('---22222-----'+msg.data);
audioContext.decodeAudioData(msg.data, function(buffer) {
var audioBufferSouceNode = audioContext.createBufferSource();
audioBufferSouceNode.buffer = buffer;
audioBufferSouceNode.connect(audioContext.destination);
audioBufferSouceNode.start(0);
}, function(e) {
console.log("failed to decode the file"+e);
});
};
ws.onerror = function (err) {
console.info('------WS------END------'+err)
}
ws.onclose=function(e){
console.info('-------WS------END------');
};
}
</script>
<script>
if(/mobile/i.test(navigator.userAgent)){
var elem=document.createElement("script");
elem.setAttribute("type","text/javascript");
elem.setAttribute("src","https://cdn.bootcss.com/eruda/1.5.4/eruda.min.js");
document.body.appendChild(elem);
elem.onload=function(){
eruda.init();
};
};
</script>
<style>
body{
word-wrap: break-word;
background:#f5f5f5 center top no-repeat;
background-size: auto 680px;
}
pre{
white-space:pre-wrap;
}
a{
text-decoration: none;
color:#06c;
}
a:hover{
color:#f00;
}
.main{
max-width:700px;
margin:0 auto;
padding-bottom:80px
}
.mainBox{
margin-top:12px;
padding: 12px;
border-radius: 6px;
background: #fff;
--border: 1px solid #f60;
box-shadow: 2px 2px 3px #aaa;
}
.btns button{
display: inline-block;
cursor: pointer;
border: none;
border-radius: 3px;
background: #f60;
color:#fff;
padding: 0 15px;
margin:3px 20px 3px 0;
line-height: 36px;
height: 36px;
overflow: hidden;
vertical-align: middle;
}
.btns button:active{
background: #f00;
}
.pd{
padding:0 0 6px 0;
}
.lb{
display:inline-block;
vertical-align: middle;
background:#00940e;
color:#fff;
font-size:14px;
padding:2px 8px;
border-radius: 99px;
}
</style>
</body>
</html>
后端代码
package com.cakecn.controller.socket;
import com.cakecn.utils.Mp3ToWavUtil;
import org.springframework.web.bind.annotation.RestController;
import javax.sound.sampled.AudioFormat;
import javax.websocket.*;
import javax.websocket.server.PathParam;
import javax.websocket.server.ServerEndpoint;
import java.io.*;
import java.nio.ByteBuffer;
import java.util.Hashtable;
import java.util.Map;
@RestController
@ServerEndpoint("/voiceChat/{key}")
public class VoiceChatSocket {
private static final Map<String, Session> connections = new Hashtable<>();
private static ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
@OnOpen
public void onOpen(@PathParam("key") String id, Session session) {
try {
System.out.println(id + "连上了");
connections.put(id, session);
connections.get(id).getBasicRemote().sendText("连接上了");
} catch (IOException e) {
e.printStackTrace();
}
}
@OnMessage
public void onMessage(@PathParam("key")String id, InputStream inputStream) {
try {
byte[] buff = new byte[inputStream.available()];
inputStream.read(buff, 0, inputStream.available());
connections.get(id).getBasicRemote().sendBinary(ByteBuffer.wrap(buff));
} catch (Exception e) {
e.printStackTrace();
}
}
@OnError
public void onError(Throwable throwable) {
throwable.printStackTrace();
}
@OnClose
public void onClose(@PathParam("key") String id) {
System.out.println(id + "断开");
connections.remove(id);
}
}
|