WechatGroup
This commit is contained in:
@@ -0,0 +1,143 @@
|
||||
//package com.ruoyi.cms.handler;
|
||||
//import java.io.File;
|
||||
//import java.io.FileOutputStream;
|
||||
//import java.io.IOException;
|
||||
//import java.nio.ByteBuffer;
|
||||
//
|
||||
//import com.alibaba.nls.client.AccessToken;
|
||||
//import com.alibaba.nls.client.protocol.NlsClient;
|
||||
//import com.alibaba.nls.client.protocol.OutputFormatEnum;
|
||||
//import com.alibaba.nls.client.protocol.SampleRateEnum;
|
||||
//import com.alibaba.nls.client.protocol.tts.SpeechSynthesizer;
|
||||
//import com.alibaba.nls.client.protocol.tts.SpeechSynthesizerListener;
|
||||
//import com.alibaba.nls.client.protocol.tts.SpeechSynthesizerResponse;
|
||||
//import org.slf4j.Logger;
|
||||
//import org.slf4j.LoggerFactory;
|
||||
///**
|
||||
// * 此示例演示了:
|
||||
// * 长文本语音合成API调用(setLongText)。
|
||||
// * 流式合成TTS。
|
||||
// * 首包延迟计算。
|
||||
// *
|
||||
// * 说明:该示例和nls-example-tts下的SpeechSynthesizerLongTextDemo不完全相同,长文本语音合成是单独的产品功能,是将一长串文本直接发送给服务端去合成,
|
||||
// * 而SpeechSynthesizerLongTextDemo演示的是将一长串文本在调用方处切割然后分段调用语音合成接口。
|
||||
// */
|
||||
//public class SpeechLongSynthesizerDemo {
|
||||
// private static final Logger logger = LoggerFactory.getLogger(SpeechLongSynthesizerDemo.class);
|
||||
// private static long startTime;
|
||||
// private String appKey;
|
||||
// NlsClient client;
|
||||
// public SpeechLongSynthesizerDemo(String appKey, String token, String url) {
|
||||
// this.appKey = appKey;
|
||||
// //创建NlsClient实例应用全局创建一个即可。生命周期可和整个应用保持一致,默认服务地址为阿里云线上服务地址。
|
||||
// if(url.isEmpty()) {
|
||||
// client = new NlsClient(token);
|
||||
// } else {
|
||||
// client = new NlsClient(url, token);
|
||||
// }
|
||||
// }
|
||||
// private static SpeechSynthesizerListener getSynthesizerListener() {
|
||||
// SpeechSynthesizerListener listener = null;
|
||||
// try {
|
||||
// listener = new SpeechSynthesizerListener() {
|
||||
// File f=new File("ttsForLongText.wav");
|
||||
// FileOutputStream fout = new FileOutputStream(f);
|
||||
// private boolean firstRecvBinary = true;
|
||||
// //语音合成结束
|
||||
// @Override
|
||||
// public void onComplete(SpeechSynthesizerResponse response) {
|
||||
// // 调用onComplete时,表示所有TTS数据已经接收完成,因此为整个合成数据的延迟。该延迟可能较大,不一定满足实时场景。
|
||||
// System.out.println("name: " + response.getName() + ", status: " + response.getStatus()+", output file :"+f.getAbsolutePath());
|
||||
// }
|
||||
// //语音合成的语音二进制数据
|
||||
// @Override
|
||||
// public void onMessage(ByteBuffer message) {
|
||||
// try {
|
||||
// if(firstRecvBinary) {
|
||||
// // 此处计算首包语音流的延迟,收到第一包语音流时,即可以进行语音播放,以提升响应速度(特别是实时交互场景下)。
|
||||
// firstRecvBinary = false;
|
||||
// long now = System.currentTimeMillis();
|
||||
// logger.info("tts first latency : " + (now - SpeechLongSynthesizerDemo.startTime) + " ms");
|
||||
// }
|
||||
// byte[] bytesArray = new byte[message.remaining()];
|
||||
// message.get(bytesArray, 0, bytesArray.length);
|
||||
// //System.out.println("write array:" + bytesArray.length);
|
||||
// fout.write(bytesArray);
|
||||
// } catch (IOException e) {
|
||||
// e.printStackTrace();
|
||||
// }
|
||||
// }
|
||||
// @Override
|
||||
// public void onFail(SpeechSynthesizerResponse response){
|
||||
// // task_id是调用方和服务端通信的唯一标识,当遇到问题时,需要提供此task_id以便排查。
|
||||
// System.out.println(
|
||||
// "task_id: " + response.getTaskId() +
|
||||
// //状态码
|
||||
// ", status: " + response.getStatus() +
|
||||
// //错误信息
|
||||
// ", status_text: " + response.getStatusText());
|
||||
// }
|
||||
// };
|
||||
// } catch (Exception e) {
|
||||
// e.printStackTrace();
|
||||
// }
|
||||
// return listener;
|
||||
// }
|
||||
// public void process(String text) {
|
||||
// SpeechSynthesizer synthesizer = null;
|
||||
// try {
|
||||
// //创建实例,建立连接。
|
||||
// synthesizer = new SpeechSynthesizer(client, getSynthesizerListener());
|
||||
// synthesizer.setAppKey(appKey);
|
||||
// //设置返回音频的编码格式。
|
||||
// synthesizer.setFormat(OutputFormatEnum.WAV);
|
||||
// //设置返回音频的采样率。
|
||||
// synthesizer.setSampleRate(SampleRateEnum.SAMPLE_RATE_16K);
|
||||
// //发音人。注意Java SDK不支持调用超高清场景对应的发音人(例如"zhiqi"),如需调用请使用restfulAPI方式。
|
||||
// synthesizer.setVoice("siyue");
|
||||
// //语调,范围是-500~500,可选,默认是0。
|
||||
// synthesizer.setPitchRate(0);
|
||||
// //语速,范围是-500~500,默认是0。
|
||||
// synthesizer.setSpeechRate(0);
|
||||
// //设置用于语音合成的文本
|
||||
// // 此处调用的是setLongText接口(原语音合成接口是setText)。
|
||||
// synthesizer.setLongText(text);
|
||||
// //此方法将以上参数设置序列化为JSON发送给服务端,并等待服务端确认。
|
||||
// long start = System.currentTimeMillis();
|
||||
// synthesizer.start();
|
||||
// logger.info("tts start latency " + (System.currentTimeMillis() - start) + " ms");
|
||||
// SpeechLongSynthesizerDemo.startTime = System.currentTimeMillis();
|
||||
// //等待语音合成结束
|
||||
// synthesizer.waitForComplete();
|
||||
// logger.info("tts stop latency " + (System.currentTimeMillis() - start) + " ms");
|
||||
// } catch (Exception e) {
|
||||
// e.printStackTrace();
|
||||
// } finally {
|
||||
// //关闭连接
|
||||
// if (null != synthesizer) {
|
||||
// synthesizer.close();
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// public void shutdown() {
|
||||
// client.shutdown();
|
||||
// }
|
||||
// public static void main(String[] args) throws Exception {
|
||||
// AccessToken accessToken = new AccessToken("LTAI5tRBahK93vPNF1JDVEPA", "x95OWb4cV6ccQVtbEJ2Gxm2Uwl2thJ");
|
||||
// accessToken.apply();
|
||||
// String token = accessToken.getToken();
|
||||
// String appKey = "mtA2pwmvCeefHT3Y";
|
||||
// // url取默认值
|
||||
// String url = "wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1";
|
||||
//
|
||||
// String ttsTextLong = "百草堂与三味书屋 鲁迅 \n" +
|
||||
// "我家的后面有一个很大的园,相传叫作百草园。现在是早已并屋子一起卖给朱文公的子孙了,连那最末次的相见也已经隔了七八年,其中似乎确凿只有一些野草;但那时却是我的乐园。\n" +
|
||||
// "不必说碧绿的菜畦,光滑的石井栏,高大的皂荚树,紫红的桑葚;也不必说鸣蝉在树叶里长吟,肥胖的黄蜂伏在菜花上,轻捷的叫天子(云雀)忽然从草间直窜向云霄里去了。\n" +
|
||||
// "单是周围的短短的泥墙根一带,就有无限趣味。油蛉在这里低唱,蟋蟀们在这里弹琴。翻开断砖来,有时会遇见蜈蚣;还有斑蝥,倘若用手指按住它的脊梁,便会啪的一声,\n" +
|
||||
// "从后窍喷出一阵烟雾。何首乌藤和木莲藤缠络着,木莲有莲房一般的果实,何首乌有臃肿的根。有人说,何首乌根是有像人形的,吃了便可以成仙,我于是常常拔它起来,牵连不断地拔起来,\n" +
|
||||
// "也曾因此弄坏了泥墙,却从来没有见过有一块根像人样! 如果不怕刺,还可以摘到覆盆子,像小珊瑚珠攒成的小球,又酸又甜,色味都比桑葚要好得远......";
|
||||
// SpeechLongSynthesizerDemo demo = new SpeechLongSynthesizerDemo(appKey, token, url);
|
||||
// demo.process(ttsTextLong);
|
||||
// demo.shutdown();
|
||||
// }
|
||||
//}
|
||||
@@ -0,0 +1,63 @@
|
||||
package com.ruoyi.cms.handler;
|
||||
|
||||
import com.alibaba.nls.client.protocol.asr.SpeechRecognizer;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.websocket.*;
|
||||
import javax.websocket.server.ServerEndpoint;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
@Component
|
||||
@ServerEndpoint("/speech-recognition")
|
||||
public class SpeechRecognitionWebSocketHandler {
|
||||
|
||||
private SpeechRecognizerAI recognizerDemo;
|
||||
|
||||
public SpeechRecognitionWebSocketHandler() {
|
||||
// 初始化语音识别器
|
||||
String appKey = "LuvNcrddU3PH8Tau";
|
||||
String id = "LTAI5tRBahK93vPNF1JDVEPA";
|
||||
String secret = "x95OWb4cV6ccQVtbEJ2Gxm2Uwl2thJ";
|
||||
String url = System.getenv().getOrDefault("NLS_GATEWAY_URL", "wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1");
|
||||
recognizerDemo = new SpeechRecognizerAI(appKey, id, secret, url);
|
||||
}
|
||||
|
||||
/**
|
||||
* 连接建立成功调用的方法
|
||||
*/
|
||||
@OnOpen
|
||||
public void onOpen(Session session) {
|
||||
System.out.println("WebSocket 连接建立成功,sessionId = " + session.getId());
|
||||
}
|
||||
|
||||
/**
|
||||
* 收到客户端消息后调用的方法
|
||||
*/
|
||||
@OnMessage(maxMessageSize=5242880)
|
||||
public void onMessage(ByteBuffer message, Session session) throws IOException {
|
||||
byte[] audioData = new byte[message.remaining()];
|
||||
message.get(audioData);
|
||||
|
||||
// 处理音频数据
|
||||
recognizerDemo.processStream(session, new ByteArrayInputStream(audioData), 16000);
|
||||
}
|
||||
|
||||
/**
|
||||
* 连接关闭调用的方法
|
||||
*/
|
||||
@OnClose
|
||||
public void onClose(Session session) {
|
||||
System.out.println("WebSocket 连接关闭,sessionId = " + session.getId());
|
||||
}
|
||||
|
||||
/**
|
||||
* 发生错误时调用的方法
|
||||
*/
|
||||
@OnError
|
||||
public void onError(Session session, Throwable error) {
|
||||
System.err.println("WebSocket 发生错误:" + error.getMessage());
|
||||
error.printStackTrace();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,117 @@
|
||||
package com.ruoyi.cms.handler;
|
||||
|
||||
import com.alibaba.nls.client.AccessToken;
|
||||
import com.alibaba.nls.client.protocol.InputFormatEnum;
|
||||
import com.alibaba.nls.client.protocol.NlsClient;
|
||||
import com.alibaba.nls.client.protocol.SampleRateEnum;
|
||||
import com.alibaba.nls.client.protocol.asr.SpeechRecognizer;
|
||||
import com.alibaba.nls.client.protocol.asr.SpeechRecognizerListener;
|
||||
import com.alibaba.nls.client.protocol.asr.SpeechRecognizerResponse;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import javax.websocket.Session;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
public class SpeechRecognizerAI {
|
||||
private static final Logger logger = LoggerFactory.getLogger(SpeechRecognizerAI.class);
|
||||
private String appKey;
|
||||
private NlsClient client;
|
||||
|
||||
public SpeechRecognizerAI(String appKey, String id, String secret, String url) {
|
||||
this.appKey = appKey;
|
||||
|
||||
// 获取 AccessToken
|
||||
AccessToken accessToken = new AccessToken(id, secret);
|
||||
try {
|
||||
accessToken.apply(); // 申请 Token
|
||||
logger.info("Token: {}, Expire Time: {}", accessToken.getToken(), accessToken.getExpireTime());
|
||||
|
||||
// 初始化 NlsClient
|
||||
if (url.isEmpty()) {
|
||||
this.client = new NlsClient(accessToken.getToken()); // 使用默认服务地址
|
||||
} else {
|
||||
this.client = new NlsClient(url, accessToken.getToken()); // 使用自定义服务地址
|
||||
}
|
||||
} catch (IOException e) {
|
||||
logger.error("Failed to initialize NlsClient: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void processStream(Session session, InputStream inputStream, int sampleRate) {
|
||||
SpeechRecognizer recognizer = null;
|
||||
try {
|
||||
// 创建 SpeechRecognizer 实例
|
||||
recognizer = new SpeechRecognizer(client, new SpeechRecognizerListener() {
|
||||
@Override
|
||||
public void onRecognitionResultChanged(SpeechRecognizerResponse response) {
|
||||
// 打印中间识别结果
|
||||
String text = response.getRecognizedText();
|
||||
logger.info("中间识别结果: {}", text);
|
||||
|
||||
sendResult(session, text,false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRecognitionCompleted(SpeechRecognizerResponse response) {
|
||||
// 打印最终识别结果
|
||||
String text = response.getRecognizedText();
|
||||
logger.info("最终识别结果: {}", text);
|
||||
sendResult(session, text,true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onStarted(SpeechRecognizerResponse response) {
|
||||
logger.info("识别开始, TaskId: {}", response.getTaskId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFail(SpeechRecognizerResponse response) {
|
||||
logger.error("识别失败: {}", response.getStatusText());
|
||||
}
|
||||
});
|
||||
|
||||
// 设置语音识别参数
|
||||
recognizer.setAppKey(appKey);
|
||||
recognizer.setFormat(InputFormatEnum.PCM);
|
||||
recognizer.setSampleRate(sampleRate == 16000 ?
|
||||
SampleRateEnum.SAMPLE_RATE_16K : SampleRateEnum.SAMPLE_RATE_8K);
|
||||
recognizer.setEnableIntermediateResult(true);
|
||||
recognizer.addCustomedParam("enable_voice_detection", true);
|
||||
|
||||
// 启动识别
|
||||
recognizer.start();
|
||||
|
||||
// 读取音频流并发送
|
||||
byte[] buffer = new byte[3200];
|
||||
int len;
|
||||
while ((len = inputStream.read(buffer)) > 0) {
|
||||
recognizer.send(buffer, len);
|
||||
}
|
||||
|
||||
// 停止识别
|
||||
recognizer.stop();
|
||||
} catch (Exception e) {
|
||||
logger.error("处理音频流时出错: {}", e.getMessage());
|
||||
} finally {
|
||||
if (recognizer != null) {
|
||||
recognizer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void sendResult(Session session, String text,Boolean asrEnd) {
|
||||
try {
|
||||
session.getBasicRemote().sendText("{\"text\": \"" + text + "\",\"asrEnd\":\"" + asrEnd + "\"}");
|
||||
} catch (IOException e) {
|
||||
logger.error("发送识别结果失败: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void shutdown() {
|
||||
if (client != null) {
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,145 @@
|
||||
package com.ruoyi.cms.handler;
|
||||
|
||||
import com.alibaba.nls.client.AccessToken;
|
||||
import com.alibaba.nls.client.protocol.NlsClient;
|
||||
import com.alibaba.nls.client.protocol.OutputFormatEnum;
|
||||
import com.alibaba.nls.client.protocol.SampleRateEnum;
|
||||
import com.alibaba.nls.client.protocol.tts.SpeechSynthesizer;
|
||||
import com.alibaba.nls.client.protocol.tts.SpeechSynthesizerListener;
|
||||
import com.alibaba.nls.client.protocol.tts.SpeechSynthesizerResponse;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import javax.websocket.*;
|
||||
import javax.websocket.server.ServerEndpoint;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
@Component
|
||||
@ServerEndpoint("/speech-synthesis")
|
||||
public class SpeechSynthesisWebSocketHandler {
|
||||
private static final Logger logger = LoggerFactory.getLogger(SpeechSynthesisWebSocketHandler.class);
|
||||
|
||||
private NlsClient client;
|
||||
private String appKey = "mtA2pwmvCeefHT3Y";
|
||||
private String accessKeyId = "LTAI5tRBahK93vPNF1JDVEPA";
|
||||
private String accessKeySecret = "x95OWb4cV6ccQVtbEJ2Gxm2Uwl2thJ";
|
||||
private String url = "wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1";
|
||||
|
||||
public SpeechSynthesisWebSocketHandler() {
|
||||
// Initialize NLS client with token
|
||||
AccessToken accessToken = new AccessToken(accessKeyId, accessKeySecret);
|
||||
try {
|
||||
accessToken.apply();
|
||||
String token = accessToken.getToken();
|
||||
if(url.isEmpty()) {
|
||||
this.client = new NlsClient(token);
|
||||
} else {
|
||||
this.client = new NlsClient(url, token);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Failed to initialize NLS client", e);
|
||||
}
|
||||
}
|
||||
|
||||
@OnOpen
|
||||
public void onOpen(Session session) {
|
||||
logger.info("WebSocket connected for speech synthesis, sessionId: {}", session.getId());
|
||||
}
|
||||
|
||||
@OnMessage(maxMessageSize=5242880)
|
||||
public void onMessage(String text, Session session) {
|
||||
logger.info("Received text for synthesis: {}", text);
|
||||
|
||||
SpeechSynthesizer synthesizer = null;
|
||||
try {
|
||||
// Create synthesizer with a session-specific listener
|
||||
synthesizer = new SpeechSynthesizer(client, createSynthesizerListener(session));
|
||||
|
||||
// Configure synthesizer
|
||||
synthesizer.setAppKey(appKey);
|
||||
synthesizer.setFormat(OutputFormatEnum.WAV);
|
||||
synthesizer.setSampleRate(SampleRateEnum.SAMPLE_RATE_16K);
|
||||
synthesizer.setVoice("aiqi");
|
||||
synthesizer.setPitchRate(0);
|
||||
synthesizer.setSpeechRate(0);
|
||||
|
||||
// Use long text synthesis
|
||||
synthesizer.setLongText(text);
|
||||
|
||||
// Start synthesis
|
||||
synthesizer.start();
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.error("Error during speech synthesis", e);
|
||||
try {
|
||||
session.close(new CloseReason(CloseReason.CloseCodes.UNEXPECTED_CONDITION, "Synthesis error"));
|
||||
} catch (IOException ioException) {
|
||||
logger.error("Error closing session", ioException);
|
||||
}
|
||||
} finally {
|
||||
// Note: We can't close the synthesizer here because synthesis is async
|
||||
// It should be closed in the listener's onComplete/onFail methods
|
||||
}
|
||||
}
|
||||
|
||||
@OnClose
|
||||
public void onClose(Session session) {
|
||||
logger.info("WebSocket closed for speech synthesis, sessionId: {}", session.getId());
|
||||
}
|
||||
|
||||
@OnError
|
||||
public void onError(Session session, Throwable error) {
|
||||
logger.error("WebSocket error for session {}: {}", session.getId(), error.getMessage(), error);
|
||||
}
|
||||
|
||||
private SpeechSynthesizerListener createSynthesizerListener(Session session) {
|
||||
return new SpeechSynthesizerListener() {
|
||||
private boolean firstRecvBinary = true;
|
||||
private long startTime;
|
||||
|
||||
@Override
|
||||
public void onComplete(SpeechSynthesizerResponse response) {
|
||||
logger.info("Synthesis completed for session {}, status: {}", session.getId(), response.getStatus());
|
||||
try {
|
||||
// Send a close message or marker to indicate completion
|
||||
session.getBasicRemote().sendText("{\"status\":\"complete\"}");
|
||||
} catch (IOException e) {
|
||||
logger.error("Error sending completion message", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onMessage(ByteBuffer message) {
|
||||
try {
|
||||
if (firstRecvBinary) {
|
||||
firstRecvBinary = false;
|
||||
startTime = System.currentTimeMillis();
|
||||
logger.info("First audio packet received for session {}", session.getId());
|
||||
}
|
||||
|
||||
// Send audio data to client
|
||||
byte[] bytesArray = new byte[message.remaining()];
|
||||
message.get(bytesArray, 0, bytesArray.length);
|
||||
session.getBasicRemote().sendBinary(ByteBuffer.wrap(bytesArray));
|
||||
|
||||
} catch (IOException e) {
|
||||
logger.error("Error sending audio data to client", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFail(SpeechSynthesizerResponse response) {
|
||||
logger.error("Synthesis failed for session {}: task_id: {}, status: {}, status_text: {}",
|
||||
session.getId(), response.getTaskId(), response.getStatus(), response.getStatusText());
|
||||
try {
|
||||
session.close(new CloseReason(CloseReason.CloseCodes.UNEXPECTED_CONDITION,
|
||||
"Synthesis failed: " + response.getStatusText()));
|
||||
} catch (IOException e) {
|
||||
logger.error("Error closing failed session", e);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,83 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Real-time Speech Recognition</title>
|
||||
</head>
|
||||
<body>
|
||||
<button id="start">Start Recording</button>
|
||||
<button id="stop" disabled>Stop Recording</button>
|
||||
<div id="output"></div>
|
||||
|
||||
<script>
|
||||
const startButton = document.getElementById('start');
|
||||
const stopButton = document.getElementById('stop');
|
||||
const outputDiv = document.getElementById('output');
|
||||
let mediaRecorder;
|
||||
let socket;
|
||||
|
||||
// 初始化 WebSocket
|
||||
function initWebSocket() {
|
||||
socket = new WebSocket('ws://127.0.0.1:8080/speech-recognition'); // 确保端口正确
|
||||
|
||||
socket.onopen = () => {
|
||||
console.log('WebSocket connection established');
|
||||
startButton.disabled = false;
|
||||
};
|
||||
|
||||
socket.onmessage = (event) => {
|
||||
const result = JSON.parse(event.data);
|
||||
outputDiv.innerHTML += `<p>${result.text}</p>`;
|
||||
};
|
||||
|
||||
socket.onclose = () => {
|
||||
console.log('WebSocket connection closed');
|
||||
// 尝试重连
|
||||
setTimeout(() => {
|
||||
console.log('Reconnecting WebSocket...');
|
||||
initWebSocket();
|
||||
}, 3000); // 3 秒后重连
|
||||
};
|
||||
|
||||
socket.onerror = (error) => {
|
||||
console.error('WebSocket error:', error);
|
||||
};
|
||||
}
|
||||
|
||||
// 开始录音
|
||||
startButton.addEventListener('click', async () => {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
mediaRecorder = new MediaRecorder(stream, { mimeType: 'audio/webm; codecs=opus' }); // 使用 Opus 编码
|
||||
|
||||
mediaRecorder.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
const reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
if (socket.readyState === WebSocket.OPEN) { // 检查 WebSocket 状态
|
||||
const audioData = reader.result;
|
||||
socket.send(audioData); // 发送音频数据
|
||||
} else {
|
||||
console.error('WebSocket is not open. Current state:', socket.readyState);
|
||||
}
|
||||
};
|
||||
reader.readAsArrayBuffer(event.data);
|
||||
}
|
||||
};
|
||||
|
||||
mediaRecorder.start(1000); // 每 1 秒发送一次数据
|
||||
startButton.disabled = true;
|
||||
stopButton.disabled = false;
|
||||
});
|
||||
|
||||
// 停止录音
|
||||
stopButton.addEventListener('click', () => {
|
||||
mediaRecorder.stop();
|
||||
startButton.disabled = false;
|
||||
stopButton.disabled = true;
|
||||
});
|
||||
|
||||
// 初始化 WebSocket
|
||||
initWebSocket();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user