语音射别修复

This commit is contained in:
francis_fh
2026-01-23 22:01:38 +08:00
parent ebb6bc6e33
commit c4c6cea579
3 changed files with 219 additions and 15 deletions

View File

@@ -20,6 +20,7 @@ export function useAudioRecorder() {
const recognizedText = ref('')
const lastFinalText = ref('')
const isRecognizing = ref(false) // 识别状态,暴露给外部
let audioStream = null
let audioContext = null
@@ -132,11 +133,15 @@ export function useAudioRecorder() {
}
case 'TranscriptionCompleted': {
lastFinalText.value = ''
isRecognizing.value = false // 识别完成,重置状态
// console.log('识别全部完成')
cleanup()
break
}
case 'TaskFailed': {
console.error('识别失败:', msg?.header?.status_text)
isRecognizing.value = false // 识别失败,重置状态
cleanup()
break
}
default:
@@ -151,7 +156,104 @@ export function useAudioRecorder() {
if (isRecording.value) return
// #ifdef MP-WEIXIN
$api.msg('小程序暂不支持语音识别功能');
try {
recognizedText.value = ''
lastFinalText.value = ''
// 开始录音时不设置isRecognizing为true只有在停止录音后才保持isRecognizing为true
const recorderManager = uni.getRecorderManager()
// 监听录音完成事件
recorderManager.onStop(async (res) => {
console.log('小程序录音完成:', res)
try {
// 停止录音后设置isRecognizing为true显示loading
isRecognizing.value = true
// 打印请求配置,便于调试
console.log('准备上传语音识别请求配置:', {
url: config.vioceBaseURl,
name: 'file',
method: 'POST',
fileType: 'audio',
filePath: res.tempFilePath
})
// 上传录音文件到服务器进行语音识别
const uploadResult = await uni.uploadFile({
url: config.vioceBaseURl,
filePath: res.tempFilePath,
name: 'file',
fileType: 'audio',
method: 'POST' // 显式设置为POST请求
})
console.log('语音识别上传结果:', uploadResult)
if (uploadResult.statusCode === 200) {
try {
const result = JSON.parse(uploadResult.data)
console.log('语音识别结果:', result)
if (result.code === 200 && result.data) {
recognizedText.value = result.data
console.log('语音识别成功,识别结果:', recognizedText.value)
// 语音识别成功后,自动发送消息
// 这里需要触发一个事件,让父组件知道识别成功
// 或者直接调用发送消息的方法
isRecognizing.value = false // 识别成功,重置状态
} else {
console.error('语音识别返回错误:', result.message || '未知错误')
$api.msg('语音识别失败,请重试')
isRecognizing.value = false // 识别失败,重置状态
}
} catch (parseErr) {
console.error('语音识别结果解析失败:', parseErr)
$api.msg('语音识别失败,请重试')
isRecognizing.value = false // 解析失败,重置状态
}
} else {
console.error('语音识别请求失败,状态码:', uploadResult.statusCode)
$api.msg('语音识别失败,请重试')
isRecognizing.value = false // 请求失败,重置状态
}
} catch (err) {
console.error('语音识别上传失败:', err)
$api.msg('语音识别失败,请重试')
isRecognizing.value = false // 上传失败,重置状态
}
})
// 监听录音错误事件
recorderManager.onError((err) => {
console.error('小程序录音错误:', err)
$api.msg('录音失败,请重试');
cleanup()
})
// 微信小程序录音API
await recorderManager.start({
duration: 60000, // 最长录音60秒
sampleRate: 16000,
numberOfChannels: 1,
encodeBitRate: 96000,
format: 'mp3'
})
isRecording.value = true
recordingDuration.value = 0
durationTimer = setInterval(() => recordingDuration.value++, 1000)
// 监听录音事件(可选)
recorderManager.onFrameRecorded((res) => {
// 更新音量显示
volumeLevel.value = res.volume || 0
audioDataForDisplay.value = Array(16).fill(volumeLevel.value)
})
} catch (err) {
console.error('小程序录音启动失败:', err)
$api.msg('录音启动失败,请重试');
cleanup()
}
return;
// #endif
@@ -164,6 +266,7 @@ export function useAudioRecorder() {
recognizedText.value = ''
lastFinalText.value = ''
// 开始录音时不设置isRecognizing为true只有在停止录音后才保持isRecognizing为true
await connectWebSocket()
audioStream = await navigator.mediaDevices.getUserMedia({
@@ -201,6 +304,7 @@ export function useAudioRecorder() {
durationTimer = setInterval(() => recordingDuration.value++, 1000)
} catch (err) {
console.error('启动失败:', err)
isRecognizing.value = false // 启动失败,重置状态
cleanup()
}
// #endif
@@ -210,6 +314,12 @@ export function useAudioRecorder() {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
// #ifdef MP-WEIXIN
uni.getRecorderManager().stop()
// 小程序中录音停止后会触发onStop事件在onStop事件中处理识别结果和状态重置
// #endif
// #ifdef H5
if (websocket?.readyState === WebSocket.OPEN) {
websocket.send(JSON.stringify({
header: {
@@ -218,18 +328,39 @@ export function useAudioRecorder() {
message_id: generateUUID()
}
}))
websocket.close()
// H5中不立即调用cleanup等待识别完成
}
// #endif
cleanup()
// 只清理录音相关资源,不重置识别状态
clearInterval(durationTimer)
audioStream?.getTracks().forEach(track => track.stop())
audioContext?.close()
audioStream = null
audioContext = null
audioInput = null
scriptProcessor = null
isRecording.value = false
isSocketConnected.value = false
isStopping.value = false
}
const cancelRecording = () => {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
// #ifdef MP-WEIXIN
uni.getRecorderManager().stop()
// #endif
// #ifdef H5
websocket?.close()
// #endif
// 取消录音时重置所有状态
cleanup()
isRecognizing.value = false
isStopping.value = false
}
@@ -249,16 +380,22 @@ export function useAudioRecorder() {
isRecording.value = false
isSocketConnected.value = false
isRecognizing.value = false // 停止录音,重置识别状态
}
onUnmounted(() => {
if (isRecording.value) stopRecording()
})
const reset = () => {
cleanup()
}
return {
isRecording,
isStopping,
isSocketConnected,
isRecognizing,
recordingDuration,
audioDataForDisplay,
volumeLevel,
@@ -266,6 +403,7 @@ export function useAudioRecorder() {
lastFinalText,
startRecording,
stopRecording,
cancelRecording
cancelRecording,
reset
}
}