diff --git a/hook/useRealtimeRecorder.js b/hook/useRealtimeRecorder.js
index 43bbc15..b0833ad 100644
--- a/hook/useRealtimeRecorder.js
+++ b/hook/useRealtimeRecorder.js
@@ -20,6 +20,7 @@ export function useAudioRecorder() {
const recognizedText = ref('')
const lastFinalText = ref('')
+ const isRecognizing = ref(false) // 识别状态,暴露给外部
let audioStream = null
let audioContext = null
@@ -132,11 +133,15 @@ export function useAudioRecorder() {
}
case 'TranscriptionCompleted': {
lastFinalText.value = ''
+ isRecognizing.value = false // 识别完成,重置状态
// console.log('识别全部完成')
+ cleanup()
break
}
case 'TaskFailed': {
console.error('识别失败:', msg?.header?.status_text)
+ isRecognizing.value = false // 识别失败,重置状态
+ cleanup()
break
}
default:
@@ -151,7 +156,104 @@ export function useAudioRecorder() {
if (isRecording.value) return
// #ifdef MP-WEIXIN
- $api.msg('小程序暂不支持语音识别功能');
+ try {
+ recognizedText.value = ''
+ lastFinalText.value = ''
+ // 开始录音时不设置isRecognizing为true,只有在停止录音后才保持isRecognizing为true
+
+ const recorderManager = uni.getRecorderManager()
+
+ // 监听录音完成事件
+ recorderManager.onStop(async (res) => {
+ console.log('小程序录音完成:', res)
+ try {
+ // 停止录音后设置isRecognizing为true,显示loading
+ isRecognizing.value = true
+
+ // 打印请求配置,便于调试
+ console.log('准备上传语音识别请求配置:', {
+ url: config.vioceBaseURl,
+ name: 'file',
+ method: 'POST',
+ fileType: 'audio',
+ filePath: res.tempFilePath
+ })
+
+ // 上传录音文件到服务器进行语音识别
+ const uploadResult = await uni.uploadFile({
+ url: config.vioceBaseURl,
+ filePath: res.tempFilePath,
+ name: 'file',
+ fileType: 'audio',
+ method: 'POST' // 显式设置为POST请求
+ })
+
+ console.log('语音识别上传结果:', uploadResult)
+
+ if (uploadResult.statusCode === 200) {
+ try {
+ const result = JSON.parse(uploadResult.data)
+ console.log('语音识别结果:', result)
+ if (result.code === 200 && result.data) {
+ recognizedText.value = result.data
+ console.log('语音识别成功,识别结果:', recognizedText.value)
+ // 语音识别成功后,自动发送消息
+ // 这里需要触发一个事件,让父组件知道识别成功
+ // 或者直接调用发送消息的方法
+ isRecognizing.value = false // 识别成功,重置状态
+ } else {
+ console.error('语音识别返回错误:', result.message || '未知错误')
+ $api.msg('语音识别失败,请重试')
+ isRecognizing.value = false // 识别失败,重置状态
+ }
+ } catch (parseErr) {
+ console.error('语音识别结果解析失败:', parseErr)
+ $api.msg('语音识别失败,请重试')
+ isRecognizing.value = false // 解析失败,重置状态
+ }
+ } else {
+ console.error('语音识别请求失败,状态码:', uploadResult.statusCode)
+ $api.msg('语音识别失败,请重试')
+ isRecognizing.value = false // 请求失败,重置状态
+ }
+ } catch (err) {
+ console.error('语音识别上传失败:', err)
+ $api.msg('语音识别失败,请重试')
+ isRecognizing.value = false // 上传失败,重置状态
+ }
+ })
+
+ // 监听录音错误事件
+ recorderManager.onError((err) => {
+ console.error('小程序录音错误:', err)
+ $api.msg('录音失败,请重试');
+ cleanup()
+ })
+
+ // 微信小程序录音API
+ await recorderManager.start({
+ duration: 60000, // 最长录音60秒
+ sampleRate: 16000,
+ numberOfChannels: 1,
+ encodeBitRate: 96000,
+ format: 'mp3'
+ })
+
+ isRecording.value = true
+ recordingDuration.value = 0
+ durationTimer = setInterval(() => recordingDuration.value++, 1000)
+
+ // 监听录音事件(可选)
+ recorderManager.onFrameRecorded((res) => {
+ // 更新音量显示
+ volumeLevel.value = res.volume || 0
+ audioDataForDisplay.value = Array(16).fill(volumeLevel.value)
+ })
+ } catch (err) {
+ console.error('小程序录音启动失败:', err)
+ $api.msg('录音启动失败,请重试');
+ cleanup()
+ }
return;
// #endif
@@ -164,6 +266,7 @@ export function useAudioRecorder() {
recognizedText.value = ''
lastFinalText.value = ''
+ // 开始录音时不设置isRecognizing为true,只有在停止录音后才保持isRecognizing为true
await connectWebSocket()
audioStream = await navigator.mediaDevices.getUserMedia({
@@ -201,6 +304,7 @@ export function useAudioRecorder() {
durationTimer = setInterval(() => recordingDuration.value++, 1000)
} catch (err) {
console.error('启动失败:', err)
+ isRecognizing.value = false // 启动失败,重置状态
cleanup()
}
// #endif
@@ -210,6 +314,12 @@ export function useAudioRecorder() {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
+ // #ifdef MP-WEIXIN
+ uni.getRecorderManager().stop()
+ // 小程序中录音停止后会触发onStop事件,在onStop事件中处理识别结果和状态重置
+ // #endif
+
+ // #ifdef H5
if (websocket?.readyState === WebSocket.OPEN) {
websocket.send(JSON.stringify({
header: {
@@ -218,18 +328,39 @@ export function useAudioRecorder() {
message_id: generateUUID()
}
}))
- websocket.close()
+ // H5中不立即调用cleanup,等待识别完成
}
+ // #endif
- cleanup()
+ // 只清理录音相关资源,不重置识别状态
+ clearInterval(durationTimer)
+ audioStream?.getTracks().forEach(track => track.stop())
+ audioContext?.close()
+ audioStream = null
+ audioContext = null
+ audioInput = null
+ scriptProcessor = null
+
+ isRecording.value = false
+ isSocketConnected.value = false
isStopping.value = false
}
const cancelRecording = () => {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
+
+ // #ifdef MP-WEIXIN
+ uni.getRecorderManager().stop()
+ // #endif
+
+ // #ifdef H5
websocket?.close()
+ // #endif
+
+ // 取消录音时重置所有状态
cleanup()
+ isRecognizing.value = false
isStopping.value = false
}
@@ -249,16 +380,22 @@ export function useAudioRecorder() {
isRecording.value = false
isSocketConnected.value = false
+ isRecognizing.value = false // 停止录音,重置识别状态
}
onUnmounted(() => {
if (isRecording.value) stopRecording()
})
+ const reset = () => {
+ cleanup()
+ }
+
return {
isRecording,
isStopping,
isSocketConnected,
+ isRecognizing,
recordingDuration,
audioDataForDisplay,
volumeLevel,
@@ -266,6 +403,7 @@ export function useAudioRecorder() {
lastFinalText,
startRecording,
stopRecording,
- cancelRecording
+ cancelRecording,
+ reset
}
}
\ No newline at end of file
diff --git a/pages/chat/components/WaveDisplay.vue b/pages/chat/components/WaveDisplay.vue
index b0d190c..30a1dd5 100644
--- a/pages/chat/components/WaveDisplay.vue
+++ b/pages/chat/components/WaveDisplay.vue
@@ -65,6 +65,26 @@ const centerIndex = ref(0);
// 动画帧ID
let animationId = null;
+// 为小程序环境提供requestAnimationFrame兼容
+const requestAnimationFramePolyfill = (callback) => {
+ // #ifdef MP-WEIXIN
+ return setTimeout(callback, 16); // 约60fps
+ // #endif
+ // #ifdef H5
+ return requestAnimationFrame(callback);
+ // #endif
+};
+
+// 为小程序环境提供cancelAnimationFrame兼容
+const cancelAnimationFramePolyfill = (id) => {
+ // #ifdef MP-WEIXIN
+ clearTimeout(id);
+ // #endif
+ // #ifdef H5
+ cancelAnimationFrame(id);
+ // #endif
+};
+
// 格式化显示时间
const formattedTime = computed(() => {
const mins = Math.floor(props.recordingTime / 60)
@@ -125,7 +145,7 @@ const updateWaveform = () => {
}
}
- animationId = requestAnimationFrame(updateWaveform);
+ animationId = requestAnimationFramePolyfill(updateWaveform);
};
// 更新单个波形条
@@ -157,14 +177,14 @@ const updateWaveBar = (index, value) => {
// 开始动画
const startAnimation = () => {
if (!animationId) {
- animationId = requestAnimationFrame(updateWaveform);
+ animationId = requestAnimationFramePolyfill(updateWaveform);
}
};
// 停止动画
const stopAnimation = () => {
if (animationId) {
- cancelAnimationFrame(animationId);
+ cancelAnimationFramePolyfill(animationId);
animationId = null;
}
};
diff --git a/pages/chat/components/ai-paging.vue b/pages/chat/components/ai-paging.vue
index 48cfa47..3206870 100644
--- a/pages/chat/components/ai-paging.vue
+++ b/pages/chat/components/ai-paging.vue
@@ -133,6 +133,20 @@
{{ recognizedText }} {{ lastFinalText }}
+
+
+
+
+
+
+
+
+
+
+ 正在识别语音...
+
+
+
@@ -175,9 +189,6 @@
@touchmove="handleTouchMove"
@touchend="handleTouchEnd"
@touchcancel="handleTouchCancel"
- :catchtouchstart="true"
- :catchtouchmove="true"
- :catchtouchend="true"
v-show="isVoice"
type="default"
>
@@ -294,11 +305,11 @@ import FileIcon from './fileIcon.vue';
import FileText from './fileText.vue';
import { useAudioRecorder } from '@/hook/useRealtimeRecorder.js';
import { useTTSPlayer } from '@/hook/useTTSPlayer.js';
+import successIcon from '@/static/icon/success.png';
// 全局
const { $api, navTo, throttle, config } = inject('globalFunction');
const emit = defineEmits(['onConfirm']);
const { messages, isTyping, textInput, chatSessionID } = storeToRefs(useChatGroupDBStore());
-import successIcon from '@/static/icon/success.png';
// hook
const {
isRecording,
@@ -309,8 +320,32 @@ const {
volumeLevel,
recognizedText,
lastFinalText,
+ recordingDuration,
+ isRecognizing,
+ reset
} = useAudioRecorder();
+// 监听语音识别结果变化,自动发送消息
+watch(
+ () => recognizedText.value,
+ (newVal) => {
+ if (newVal && newVal.trim()) {
+ console.log('监听到语音识别结果变化,自动发送消息:', newVal);
+ sendMessage(newVal);
+ }
+ }
+);
+
+// 监听isRecognizing状态,显示提示
+watch(
+ () => isRecognizing.value,
+ (newVal) => {
+ if (newVal) {
+ $api.msg('正在识别语音...');
+ }
+ }
+);
+
const { speak, pause, resume, isSpeaking, isPaused, cancelAudio } = useTTSPlayer(config.speechSynthesis);
// 获取组件实例(用于小程序 SelectorQuery)
@@ -362,6 +397,7 @@ onMounted(async () => {
changeQueries();
scrollToBottom();
isAudioPermission.value = await requestMicPermission();
+ reset(); // 重置语音识别状态
});
const requestMicPermission = async () => {
@@ -684,17 +720,22 @@ const handleTouchEnd = () => {
if (status.value === 'cancel') {
console.log('取消发送');
cancelRecording();
+ status.value = 'idle';
} else {
stopRecording();
if (isAudioPermission.value) {
- if (recognizedText.value) {
- sendMessage(recognizedText.value);
- } else {
+ // 主要根据录音时长判断,而不是完全依赖识别结果
+ // 由于setInterval是异步的,这里需要考虑计时延迟
+ const actualDuration = recordingDuration.value > 0 ? recordingDuration.value : (isRecording.value ? 0.5 : 0);
+ if (actualDuration < 1) {
$api.msg('说话时长太短');
+ status.value = 'idle';
+ } else {
+ // 状态管理由useAudioRecorder hook内部处理
+ status.value = 'idle';
}
}
}
- status.value = 'idle';
};
const handleTouchCancel = () => {
@@ -1118,6 +1159,11 @@ image-margin-top = 40rpx
-moz-user-select:none;
-ms-user-select:none;
touch-action: none; /* 禁用默认滚动 */
+ position: fixed;
+ left: 0;
+ right: 0;
+ bottom: 160rpx; /* 为底部导航栏留出空间 */
+ z-index: 9999; /* 确保高于其他元素 */
.record-tip
font-weight: 400;
color: #909090;