使用websocket实现录音实例

参考文档:认识HTML5的WebSocket

chrome 支持语言聊天 下面介绍一个chrome 的录音实例:


1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

<!DOCTYPE
HTML>

<htmllang="en">

    <head>

        <metacharset

=
"utf-8"/>

        <title>Chat
by Web Sockets</
title>

        <scripttype="text/javascript"src="js/recorder.js">
</
script>

        <scripttype="text/javascript"src="js/jquery-1.10.1.min.js">
</
script>

         

        <styletype='text/css'>

            

        </style>

    </head>

    <body>

        <audiocontrols
autoplay></
audio>

        

       <inputtype="button"id="record"value="Record">

       <inputtype="button"id="export"value="Export">

       <divid="message"></div>

    </body>

     

    <scripttype='text/javascript'>

            var
onFail = function(e) {

                console.log('Rejected!',
e);

            };

         

            var
onSuccess = function(s) {

                var
context = new webkitAudioContext();

                var
mediaStreamSource = context.createMediaStreamSource(s);

                rec
= new Recorder(mediaStreamSource);

                //rec.record();

         

                //
audio loopback

                //
mediaStreamSource.connect(context.destination);

            }

         

            //window.URL
= URL || window.URL || window.webkitURL;

            navigator.getUserMedia 
= navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;

         

            var
rec;

            var
audio = document.querySelector('#audio');

         

            function
startRecording() {

                if
(navigator.getUserMedia) {

                    navigator.getUserMedia({audio:
true}, onSuccess, onFail);

                }
else {

                    console.log('navigator.getUserMedia
not present');

                }

            }

            startRecording();

            //--------------------     

            $('#record').click(function()
{

                rec.record();

               var
dd = ws.send("start");

                $("#message").text("Click
export to stop recording");

     

                //
export a wav every second, so we can send it using websockets

                intervalKey
= setInterval(function() {

                    rec.exportWAV(function(blob)
{

                         

                        rec.clear();

                        ws.send(blob);

                        //audio.src
= URL.createObjectURL(blob);

                    });

                },
3000);

            });

             

            $('#export').click(function()
{

                //
first send the stop command

                rec.stop();

                ws.send("stop");

                clearInterval(intervalKey);

                 

                ws.send("analyze");

                $("#message").text("");

            });

             

            var
ws = new WebSocket("ws://127.0.0.1:8088/websocket/servlet/record");

            ws.onopen
= function () {

                console.log("Openened
connection to websocket");

            };

            ws.onclose
= function (){

                 console.log("Close
connection to websocket");

            }

            ws.onmessage
= function(e) {

                audio.src
= URL.createObjectURL(e.data);

            }

             

            

        </script>

</html>

recorder.js内容:


1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

(function(window){

 

  varWORKER_PATH
=
'js/recorderWorker.js';

 

  varRecorder
=
function(source,
cfg){

    varconfig
= cfg || {};

    varbufferLen
= config.bufferLen || 4096;

    this.context
= source.context;

    this.node
=
this.context.createJavaScriptNode(bufferLen,
2, 2);

    varworker
=
newWorker(config.workerPath
|| WORKER_PATH);

    worker.postMessage({

      command:'init',

      config:
{

        sampleRate:this.context.sampleRate

      }

    });

    varrecording
=
false,

      currCallback;

 

    this.node.onaudioprocess
=
function(e){

      if(!recording)
return;

      worker.postMessage({

        command:'record',

        buffer:
[

          e.inputBuffer.getChannelData(0),

          e.inputBuffer.getChannelData(1)

        ]

      });

    }

 

    this.configure
=
function(cfg){

      for(varprop
incfg){

        if(cfg.hasOwnProperty(prop)){

          config[prop]
= cfg[prop];

        }

      }

    }

 

    this.record
=
function(){

      recording
=
true;

    }

 

    this.stop
=
function(){

      recording
=
false;

    }

 

    this.clear
=
function(){

      worker.postMessage({
command:
'clear'});

    }

 

    this.getBuffer
=
function(cb)
{

      currCallback
= cb || config.callback;

      worker.postMessage({
command:
'getBuffer'})

    }

 

    this.exportWAV
=
function(cb,
type){

      currCallback
= cb || config.callback;

      type
= type || config.type ||
'audio/wav';

      if(!currCallback)
thrownew

Error(
'Callback
not set'
);

      worker.postMessage({

        command:'exportWAV',

        type:
type

      });

    }

 

    worker.onmessage
=
function(e){

      varblob
= e.data;

      currCallback(blob);

    }

 

    source.connect(this.node);

    this.node.connect(this.context.destination);   //this
should not be necessary

  };

 

  Recorder.forceDownload
=
function(blob,
filename){

    varurl
= (window.URL || window.webkitURL).createObjectURL(blob);

    varlink
= window.document.createElement(
'a');

    link.href
= url;

    link.download
= filename ||
'output.wav';

    varclick
= document.createEvent(
"Event");

    click.initEvent("click",true,true);

    link.dispatchEvent(click);

  }

 

  window.Recorder
= Recorder;

 

})(window);

 

recorderWorker.js的内容:


1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

varrecLength
= 0,

  recBuffersL
= [],

  recBuffersR
= [],

  sampleRate;

 

this.onmessage
=
function(e){

  switch(e.data.command){

    case'init':

      init(e.data.config);

      break;

    case'record':

      record(e.data.buffer);

      break;

    case'exportWAV':

      exportWAV(e.data.type);

      break;

    case'getBuffer':

      getBuffer();

      break;

    case'clear':

      clear();

      break;

  }

};

 

functioninit(config){

  sampleRate
= config.sampleRate;

}

 

functionrecord(inputBuffer){

  recBuffersL.push(inputBuffer[0]);

  recBuffersR.push(inputBuffer[1]);

  recLength
+= inputBuffer[0].length;

}

 

functionexportWAV(type){

  varbufferL
= mergeBuffers(recBuffersL, recLength);

  varbufferR
= mergeBuffers(recBuffersR, recLength);

  varinterleaved
= interleave(bufferL, bufferR);

  vardataview
= encodeWAV(interleaved);

  varaudioBlob
=
newBlob([dataview],
{ type: type });

 

  this.postMessage(audioBlob);

}

 

functiongetBuffer()
{

  varbuffers
= [];

  buffers.push(
mergeBuffers(recBuffersL, recLength) );

  buffers.push(
mergeBuffers(recBuffersR, recLength) );

  this.postMessage(buffers);

}

 

functionclear(){

  recLength
= 0;

  recBuffersL
= [];

  recBuffersR
= [];

}

 

functionmergeBuffers(recBuffers,
recLength){

  varresult
=
newFloat32Array(recLength);

  varoffset
= 0;

  for(vari
= 0; i < recBuffers.length; i++){

    result.set(recBuffers[i],
offset);

    offset
+= recBuffers[i].length;

  }

  returnresult;

}

 

functioninterleave(inputL,
inputR){

  varlength
= inputL.length + inputR.length;

  varresult
=
newFloat32Array(length);

 

  varindex
= 0,

    inputIndex
= 0;

 

  while(index
< length){

    result[index++]
= inputL[inputIndex];

    result[index++]
= inputR[inputIndex];

    inputIndex++;

  }

  returnresult;

}

 

functionfloatTo16BitPCM(output,
offset, input){

  for(vari
= 0; i < input.length; i++, offset+=2){

    vars
= Math.max(-1, Math.min(1, input[i]));

    output.setInt16(offset,
s < 0 ? s * 0x8000 : s * 0x7FFF,
true);

  }

}

 

functionwriteString(view,
offset, string){

  for(vari
= 0; i < string.length; i++){

    view.setUint8(offset
+ i, string.charCodeAt(i));

  }

}

 

functionencodeWAV(samples){

  varbuffer
=
newArrayBuffer(44
+ samples.length * 2);

  varview
=
newDataView(buffer);

 

  /*
RIFF identifier */

  writeString(view,
0,
'RIFF');

  /*
file length */

  view.setUint32(4,
32 + samples.length * 2,
true);

  /*
RIFF type */

  writeString(view,
8,
'WAVE');

  /*
format chunk identifier */

  writeString(view,
12,
'fmt
'
);

  /*
format chunk length */

  view.setUint32(16,
16,
true);

  /*
sample format (raw) */

  view.setUint16(20,
1,
true);

  /*
channel count */

  view.setUint16(22,
2,
true);

  /*
sample rate */

  view.setUint32(24,
sampleRate,
true);

  /*
byte rate (sample rate * block align) */

  view.setUint32(28,
sampleRate * 4,
true);

  /*
block align (channel count * bytes per sample) */

  view.setUint16(32,
4,
true);

  /*
bits per sample */

  view.setUint16(34,
16,
true);

  /*
data chunk identifier */

  writeString(view,
36,
'data');

  /*
data chunk length */

  view.setUint32(40,
samples.length * 2,
true);

 

  floatTo16BitPCM(view,
44, samples);

 

  returnview;

}

 

后台websocket代码:


1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

packagecom.test;

 

 

importjava.io.ByteArrayInputStream;

importjava.io.ByteArrayOutputStream;

importjava.io.File;

importjava.io.FileOutputStream;

importjava.io.IOException;

importjava.io.SequenceInputStream;

 

importjavax.servlet.http.HttpServletRequest;

importjavax.sound.sampled.AudioFileFormat;

importjavax.sound.sampled.AudioInputStream;

importjavax.sound.sampled.AudioSystem;

 

 

importorg.apache.commons.logging.Log;

importorg.apache.commons.logging.LogFactory;

importorg.eclipse.jetty.io.Connection;

importorg.eclipse.jetty.server.Server;

importorg.eclipse.jetty.server.nio.SelectChannelConnector;

importorg.eclipse.jetty.websocket.WebSocket;

importorg.eclipse.jetty.websocket.WebSocketHandler;

 

 

 

publicclass

TestRecordServlet
extendsServer
{

     

     

    privatefinal

static

Log LOG =  LogFactory.getLog( TestRecordServlet.
class);

      

    publicTestRecordServlet(intport)
{

        SelectChannelConnector
connector =
newSelectChannelConnector();

        connector.setPort(port);

        addConnector(connector);

  

        WebSocketHandler
wsHandler =
newWebSocketHandler()
{

            publicWebSocket
doWebSocketConnect(HttpServletRequest request, String protocol) {

                returnnew

FaceDetectWebSocket();

            }

        };

        setHandler(wsHandler);

    }

  

    /**

     *
Simple innerclass that is used to handle websocket connections.

     *

     *
@author jos

     */

    privatestatic

class

FaceDetectWebSocket
implementsWebSocket,

            WebSocket.OnBinaryMessage,
WebSocket.OnTextMessage {

        privateString
currentCommand =
"";

         

        privateConnection
connection;

        //private
FaceDetection faceDetection = new FaceDetection();

  

        publicFaceDetectWebSocket()
{

            super();

        }

  

        /**

         *
On open we set the connection locally, and enable

         *
binary support

         */

        publicvoid

onOpen(Connection connection) {

            this.connection
= connection;

            this.connection.setMaxBinaryMessageSize(1024*
512);

        }

  

        /**

         *
Cleanup if needed. Not used for this example

         */

        publicvoid

onClose(
intcode,
String message) {}

  

        /**

         *
When we receive a binary message we assume it is an image. We then run this

         *
image through our face detection algorithm and send back the response.

         */

        publicvoid

onMessage(
byte[]
data,
intoffset,
intlength)
{

  

            if(currentCommand.equals("start"))
{

                try{

                    //
The temporary file that contains our captured audio stream

                    File
f =
newFile("out.wav");

  

                    //
if the file already exists we append it.

                    if(f.exists())
{

                        LOG.info("Adding
received block to existing file."
);

  

                        //
two clips are used to concat the data

                         AudioInputStream
clip1 = AudioSystem.getAudioInputStream(f);

                         AudioInputStream
clip2 = AudioSystem.getAudioInputStream(
newByteArrayInputStream(data));

  

                         //
use a sequenceinput to cat them together

                         AudioInputStream
appendedFiles =

                                    newAudioInputStream(

                                        newSequenceInputStream(clip1,
clip2),    

                                        clip1.getFormat(),

                                        clip1.getFrameLength()
+ clip2.getFrameLength());

  

                         //
write out the output to a temporary file

                            AudioSystem.write(appendedFiles,

                                    AudioFileFormat.Type.WAVE,

                                    newFile("out2.wav"));

  

                            //
rename the files and delete the old one

                            File
f1 =
newFile("out.wav");

                            File
f2 =
newFile("out2.wav");

                            f1.delete();

                            f2.renameTo(newFile("out.wav"));

                    }else{

                        LOG.info("Starting
new recording."
);

                        FileOutputStream
fOut =
newFileOutputStream("out.wav",true);

                        fOut.write(data);

                        fOut.close();

                    }          

                }catch(Exception
e) {

                    LOG.error("sss:"+
e );

                }

            }

        }

  

        publicvoid

onMessage(String data) {

            if(data.startsWith("start"))
{

                //
before we start we cleanup anything left over

                //cleanup();

                currentCommand
=
"start";

            }elseif

(data.startsWith(
"stop"))
{

                currentCommand
=
"stop";

            }elseif

(data.startsWith(
"clear"))
{

                //
just remove the current recording

                //cleanup();

            }elseif

(data.startsWith(
"analyze"))
{

                 

            }

        }

    }

  

    /**

     *
Start the server on port 999

     */

    publicstatic

void

main(String[] args)
throwsException
{

        TestRecordServlet
server =
newTestRecordServlet(8080);

        server.start();

        server.join();

    }

}

 

 

时间: 2024-11-05 17:28:29

使用websocket实现录音实例的相关文章

node+websocket 实时聊天实例教程

 最近,负责的游戏需要一个聊天功能,由于是实时的聊天,便想到了现在挺火的websocket,折腾了一天一夜,总算有点眉目了,现在总结如下: websocket 是html5一个通信协议,可以实时通信.本例的聊天是用的socket的框架socket.io实现的,socket.io 集成了websocket和xhr-polling(长轮询)等多种通信方式 1.搭建node环境 从node官网,本人选择的window的msi 一键安装.安装完node 之后还需配置系统环境变量 PATH 属性里添加上你

使用swoole扩展php websocket示例_php实例

复制代码 代码如下: <?phpdefine('DEBUG', 'on');define("WEBPATH", str_replace("\\","/", __DIR__));require __DIR__ . '/../libs/lib_config.php'; class WebSocket extends Swoole\Network\Protocol\WebSocket{    /**     * 下线时,通知所有人     */ 

《音乐达人秀:Adobe Audition CC实战222例》——第1篇 网络音乐的发展与录音编辑实例 第1章 电脑录音和网络音乐的发展 1.1 从容易发霉的磁带到能永久珍藏的数字音乐

第1篇 网络音乐的发展与录音编辑实例 第1章 电脑录音和网络音乐的发展 第2章 音乐录音实例 第3章 歌曲录制实例 第1章 电脑录音和网络音乐的发展 在讲解Audition CC软件的222个操作实例之前,要先来了解一下录音的发展史和网络音乐的发展趋势,因为毕竟这些实例最终还是要用在录音上.在第1章,读者能了解到数字音乐.音乐软件.网络音乐等的发展,掌握这些就能对录音以及制作的基本思想有一个很好的认识,今后使用软件也不会盲目,而是善于跟着软件的发展来不断掌握新技能. 核心内容提要: 从容易发霉的

《音乐达人秀:Adobe Audition实战200例》——实例16 歌没录完,第二天继续录音

实例16 歌没录完,第二天继续录音 在前面的实例中讲解了歌声.乐器的录制.不过,有的音乐比较长,如长段钢琴演奏.音乐话剧等,如果第一天由于时间的安排原因没有录完,在第二天就要接着录,那么该如何进行呢?下面来看详细的步骤. 步骤01:第一天的录音需要保存下来.如果是在多轨视图下,就要单击[文件]|[会话另存为]命令,保存为SES会话文件. 小提醒: 在多轨视图下保存为SES会话文件后,如果下次还要继续录音,就暂不必进行混缩.当然,如果要换电脑或重装系统,就得混缩. 步骤02:到了第二天,打开Aud

《音乐达人秀:Adobe Audition实战200例》——第1篇 网络音乐的发展与录音编辑实例 第1章 电脑录音和网络音乐的发展 1.1 从容易发霉的磁带到能永久珍藏的数字音乐

第1篇 网络音乐的发展与录音编辑实例 第1章 电脑录音和网络音乐的发展 第2章 音乐录音实例 第3章 单轨编辑实例 第4章 多轨编辑实例 第1章 电脑录音和网络音乐的发展 在讲解Audition软件实例200例之前,要先来了解一下录音的发展史和网络音乐的发展趋势,因为毕竟这些实例最终还是要用在录音上.在第1章,读者能了解到数字音乐.音乐软件.网络音乐等的发展,掌握这些就能对录音以及制作的基本思想有一个很好的认识,今后使用软件也不会盲目,善于跟着软件的发展来不断掌握新技能. 核心内容提要: 从容易

《音乐达人秀:Adobe Audition CC实战222例》——实例1 录制QQ聊天之一——对方播放的网络音乐

实例1 录制QQ聊天之一--对方播放的网络音乐 电脑中的声音,从技术上说有很多种来源.在屏幕最下一行右下角的任务栏托盘中,用鼠标右键单击音量图标,然后在弹出的快捷菜单中选择[打开音量合成器]命令,就可以看到依次有扬声器和系统声音等音量控制. 我们再打开一个音乐播放软件,如百度音乐软件,然后播放一首歌曲.此时再单击到"音量合成器-扬声器"窗口,就会发现,在音量控制中,多了音乐播放软件的音量控制,如图2-1所示. 我们继续在刚才的右键菜单中选择[播放设备]命令,在"声音"

使用 HTML5 WebSocket 构建实时 Web 应用

在 IBM Bluemix 云平台上开发并部署您的下一个应用. 开始您的试用 作为下一代的 Web 标准,HTML5 拥有许多引人注目的新特性,如 Canvas.本地存储.多媒体编程接口.WebSocket 等等.这其中有"Web 的 TCP "之称的 WebSocket 格外吸引开发人员的注意.WebSocket 的出现使得浏览器提供对 Socket 的支持成为可能,从而在浏览器和服务器之间提供了一个基于 TCP 连接的双向通道.Web 开发人员可以非常方便地使用 WebSocket

WebSocket实战之——JavaScript例子

源码地址:https://github.com/Tinywan/PHP_Experience   一.详细代码案例          详细解读一个简单html5 WebSocket的Js实例教程,附带完整的javascript websocket实例源码,以及实例代码效果演示页面,并对本实例的核心代码进行了深入解读.从WebSocket通讯三个阶段(打开握手.数据传递.关闭握手)进行了探讨,各阶段中浏览器和服务器做了些什么事情也有所涉及. //检查浏览器是否支持WebSocket if(wind

《音乐达人秀:Adobe Audition实战200例》——导读

前 言 作者健逗,原名杨帆,湖南株洲人,IT自由撰稿人,2007年毕业于中南大学计算机科学与技术专业,曾在苏州科技局生产力促进中心(亚太经合组织技术转移中心)当过网管.除掌握电脑.网站.音视频技术外,还有比较丰富的教学经验,并具有音乐创作方面的天赋.曾在<电脑报>.<电脑迷>杂志.天极网.新浪.网易等权威媒体发表IT文章50多篇.个人网站:http://jiandou.com/it. 本书以"音乐录音"为核心,以"实例"的方式来讲解Adobe