js实现mp3录音通过websocket实时传送+简易波形图效果
程序员文章站
2022-07-06 17:41:33
波形图:废话:想不到我的第一篇博客是关于前端,作为一名后端的小菜,前端方面肯定还有很多不足之处,如果文章有任何问题欢迎指正。感谢大家。好了!废话不多说下面讲一下需求。需求:公司要求实现web端的录音并...
波形图:
废话:想不到我的第一篇博客是关于前端,作为一名后端的小菜,前端方面肯定还有很多不足之处,如果文章有任何问题欢迎指正。感谢大家。好了!废话不多说下面讲一下需求。
需求:公司要求实现web端的录音并通过websocket实时上传至java后台,而且能通过vlc实时播放,简单一点讲就是我用网页在那一边讲话,一个大喇叭就能实时把我的话播出去,这样是不是通俗易懂呀,而且呢公司要求用mp3格式。当然啦!为了知道自己在讲话需要一个波形图,这里主要实现前半部分功能,后半部分臣妾也做不到呀!后半部分的vlc播放呢如果大家想知道,可以留言,届时可以给大家指条明路
前端实现:
引入:
<script type="text/javascript" src="/js/recorder/recordmp3.js"></script>
这个跟大佬的js有点不一样,我在里面加了一点东西,而且在这个js里面引入了两个另外的js,lame.min.js和worker-realtime.js,这俩在大佬的代码里有
页面:
<!doctype html public "-//w3c//dtd xhtml 1.0 transitional//en" "http://www.w3.org/tr/xhtml1/dtd/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="content-type" content="text/html; charset=gb2312"/> <title>测试</title> </head> <body> <button id="intercombegin">开始对讲</button> <button id="intercomend">关闭对讲</button> <canvas id="casvased" style="width: 400px;height: 100px"></canvas> </body> <script type="text/javascript" src="/js/jquery-3.3.1.js"></script> <script type="text/javascript" src="/js/recorder/recordmp3.js"></script> <script type="text/javascript"> var begin = document.getelementbyid('intercombegin'); var end = document.getelementbyid('intercomend'); var canvas = document.getelementbyid("casvased"); var canvasctx = canvas.getcontext("2d"); var ws = null; //实现websocket var recorder; /* * websocket */ function usewebsocket() { ws = new websocket("ws://127.0.0.1:8089/send/voice"); ws.binarytype = 'arraybuffer'; //传输的是 arraybuffer 类型的数据 ws.onopen = function () { console.log('握手成功'); if (ws.readystate == 1) { //ws进入连接状态,则每隔500毫秒发送一包数据 recorder.start(); } }; ws.onmessage = function (msg) { console.info(msg) } ws.onerror = function (err) { console.info(err) } } /* * 开始对讲 */ begin.onclick = function () { recorder = new mp3recorder({ debug: true, funok: function () { console.log('点击录制,开始录音! '); }, funcancel: function (msg) { console.log(msg); recorder = null; } }); } /* * 关闭对讲 */ end.onclick = function () { if (ws) { ws.close(); recorder.stop(); console.log('关闭对讲以及websocket'); } } var senddata = function() { //对以获取的数据进行处理(分包) var reader = new filereader(); reader.onload = e => { var outbuffer = e.target.result; var arr = new int8array(outbuffer); if (arr.length > 0) { var tmparr = new int8array(1024); var j = 0; for (var i = 0; i < arr.bytelength; i++) { tmparr[j++] = arr[i]; if (((i + 1) % 1024) == 0) { ws.send(tmparr); if (arr.bytelength - i - 1 >= 1024) { tmparr = new int8array(1024); } else { tmparr = new int8array(arr.bytelength - i - 1); } j = 0; } if ((i + 1 == arr.bytelength) && ((i + 1) % 1024) != 0) { ws.send(tmparr); } } } }; recorder.getmp3blob(function (blob) { reader.readasarraybuffer(blob);//这里拿到mp3格式的音频流写入到reader中 }) }; </script> </html> recordmp3.js (function (exports) { var mp3recorder = function (config) { var recorder = this; config = config || {}; config.samplerate = config.samplerate || 44100; config.bitrate = config.bitrate || 128; navigator.getusermedia = navigator.getusermedia || navigator.webkitgetusermedia || navigator.mozgetusermedia || navigator.msgetusermedia; if (navigator.getusermedia) { navigator.getusermedia({ audio: true }, function (stream) { var context = new audiocontext(), microphone = context.createmediastreamsource(stream), processor = context.createscriptprocessor(16384, 1, 1),//buffersize大小,输入channel数,输出channel数 mp3receivesuccess, currenterrorcallback; var height = 100; var width = 400; const analyser = context.createanalyser() analyser.fftsize = 1024 //连接到音频源 microphone.connect(analyser); analyser.connect(context.destination); const bufferlength = analyser.frequencybincount // 返回的是 analyser的fftsize的一半 const dataarray = new uint8array(bufferlength); function draw() { canvasctx.clearrect(0, 0, width, height); //清除画布 analyser.getbytefrequencydata(dataarray); // 将当前频率数据复制到传入其中的uint8array const requestanimframe = window.requestanimationframe(draw) || window.webkitrequestanimationframe(draw); canvasctx.fillstyle = '#000130'; canvasctx.fillrect(0, 0, width, height); let barwidth = (width / bufferlength) * 2; let barheight; let x = 0; let c = 2 for (let i = 0; i < bufferlength; i++) { barheight = c+(dataarray[i]/400)*height; canvasctx.fillstyle = 'rgb(0, 255, 30)'; canvasctx.fillrect(x, height / 2 - barheight / 2, barwidth, barheight); x += barwidth + 1; } } draw(); usewebsocket(); config.samplerate = context.samplerate; processor.onaudioprocess = function (event) { //边录音边转换 var array = event.inputbuffer.getchanneldata(0); realtimeworker.postmessage({cmd: 'encode', buf: array}); senddata(); }; var realtimeworker = new worker('/js/recorder/worker-realtime.js'); realtimeworker.onmessage = function (e) { switch (e.data.cmd) { case 'init': log('初始化成功'); if (config.funok) { config.funok(); } break; case 'end': log('mp3大小:', e.data.buf.length); if (mp3receivesuccess) { mp3receivesuccess(new blob(e.data.buf, {type: 'audio/mp3'})); } break; case 'error': log('错误信息:' + e.data.error); if (currenterrorcallback) { currenterrorcallback(e.data.error); } break; default: log('未知信息:', e.data); } }; recorder.getmp3blob = function (onsuccess, onerror) { currenterrorcallback = onerror; mp3receivesuccess = onsuccess; realtimeworker.postmessage({cmd: 'finish'}); }; recorder.start = function () { if (processor && microphone) { microphone.connect(processor); processor.connect(context.destination); log('开始录音'); } } recorder.stop = function () { if (processor && microphone) { microphone.disconnect(); processor.disconnect(); log('录音结束'); } } realtimeworker.postmessage({ cmd: 'init', config: { samplerate: config.samplerate, bitrate: config.bitrate } }); }, function (error) { var msg; switch (error.code || error.name) { case 'permission_denied': case 'permissiondeniederror': msg = '用户拒绝访问麦客风'; break; case 'not_supported_error': case 'notsupportederror': msg = '浏览器不支持麦客风'; break; case 'mandatory_unsatisfied_error': case 'mandatoryunsatisfiederror': msg = '找不到麦客风设备'; break; default: msg = '无法打开麦克风,异常信息:' + (error.code || error.name); break; } if (config.funcancel) { config.funcancel(msg); } }); } else { if (config.funcancel) { config.funcancel('当前浏览器不支持录音功能'); } } function log(str) { if (config.debug) { console.log(str); } } } exports.mp3recorder = mp3recorder; })(window);
后端websocket:
这里实现的是保存为mp3文件
package com.jetosend.common.socket; import com.jetosend.common.utils.utils; import org.springframework.stereotype.component; import javax.websocket.*; import javax.websocket.server.pathparam; import javax.websocket.server.serverendpoint; import java.io.*; import java.nio.bytebuffer; import java.util.hashtable; import java.util.map; @serverendpoint("/send/{key}") @component public class serversocket { private static final map<string, session> connections = new hashtable<>(); bytearrayoutputstream bytearrayoutputstream = new bytearrayoutputstream(); /*** * @description:打开连接 * @param: [id, 保存对方平台的资源编码 * session] * @return: void * @author: liting * @date: 2019-10-10 09:22 */ @onopen public void onopen(@pathparam("key") string id, session session) { system.out.println(id + "连上了"); connections.put(id, session); } /** * 接收消息 */ @onmessage public void onmessage(@pathparam("key") string id, inputstream inputstream) { system.out.println("来自" + id); try { int rc = 0; byte[] buff = new byte[100]; while ((rc = inputstream.read(buff, 0, 100)) > 0) { bytearrayoutputstream.write(buff, 0, rc); } } catch (exception e) { e.printstacktrace(); } } /** * 异常处理 * * @param throwable */ @onerror public void onerror(throwable throwable) { throwable.printstacktrace(); //todo 日志打印异常 } /** * 关闭连接 */ @onclose public void onclose(@pathparam("key") string id) { system.out.println(id + "断开"); bufferedoutputstream bos = null; fileoutputstream fos = null; file file = null; try { file = new file("d:\\testtest.mp3"); //输出流 fos = new fileoutputstream(file); //缓冲流 bos = new bufferedoutputstream(fos); //将字节数组写出 bos.write(bytearrayoutputstream.tobytearray()); } catch (exception e) { e.printstacktrace(); } finally { if (bos != null) { try { bos.close(); } catch (ioexception e) { e.printstacktrace(); } } if (fos != null) { try { fos.close(); } catch (ioexception e) { e.printstacktrace(); } } } connections.remove(id); }
实现效果:
总结
到此这篇关于js实现mp3录音通过websocket实时传送+简易波形图效果的文章就介绍到这了,更多相关js实现mp3录音内容请搜索以前的文章或继续浏览下面的相关文章希望大家以后多多支持!
上一篇: Oracle按身份证号得到省市、性别、年龄的示例代码
下一篇: 详解vue 组件