2025-03-28 15:19:42 +08:00
|
|
|
|
import {
|
|
|
|
|
|
ref,
|
|
|
|
|
|
onUnmounted
|
2025-07-22 15:20:21 +08:00
|
|
|
|
} from 'vue'
|
|
|
|
|
|
import {
|
|
|
|
|
|
$api,
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
} from '../common/globalFunction';
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
import config from '@/config'
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
export function useAudioRecorder() {
|
|
|
|
|
|
const isRecording = ref(false)
|
|
|
|
|
|
const isStopping = ref(false)
|
|
|
|
|
|
const isSocketConnected = ref(false)
|
|
|
|
|
|
const recordingDuration = ref(0)
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
const audioDataForDisplay = ref(new Array(16).fill(0))
|
|
|
|
|
|
const volumeLevel = ref(0)
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
const recognizedText = ref('')
|
|
|
|
|
|
const lastFinalText = ref('')
|
2026-01-23 22:01:38 +08:00
|
|
|
|
const isRecognizing = ref(false) // 识别状态,暴露给外部
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
let audioStream = null
|
|
|
|
|
|
let audioContext = null
|
|
|
|
|
|
let audioInput = null
|
|
|
|
|
|
let scriptProcessor = null
|
|
|
|
|
|
let websocket = null
|
|
|
|
|
|
let durationTimer = null
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
const generateUUID = () => {
|
|
|
|
|
|
return ([1e7] + -1e3 + -4e3 + -8e3 + -1e11)
|
|
|
|
|
|
.replace(/[018]/g, c =>
|
|
|
|
|
|
(c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
|
|
|
|
|
|
).replace(/-/g, '')
|
2025-04-07 09:10:55 +08:00
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
const fetchWsUrl = async () => {
|
|
|
|
|
|
const res = await $api.createRequest('/app/speech/getToken')
|
|
|
|
|
|
if (res.code !== 200) throw new Error('无法获取语音识别 wsUrl')
|
|
|
|
|
|
const wsUrl = res.msg
|
|
|
|
|
|
return wsUrl
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function extractWsParams(wsUrl) {
|
|
|
|
|
|
const url = new URL(wsUrl)
|
|
|
|
|
|
const appkey = url.searchParams.get('appkey')
|
|
|
|
|
|
const token = url.searchParams.get('token')
|
|
|
|
|
|
return {
|
|
|
|
|
|
appkey,
|
|
|
|
|
|
token
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-04-07 09:10:55 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
const connectWebSocket = async () => {
|
|
|
|
|
|
const wsUrl = await fetchWsUrl()
|
|
|
|
|
|
const {
|
|
|
|
|
|
appkey,
|
|
|
|
|
|
token
|
|
|
|
|
|
} = extractWsParams(wsUrl)
|
|
|
|
|
|
return new Promise((resolve, reject) => {
|
|
|
|
|
|
websocket = new WebSocket(wsUrl)
|
|
|
|
|
|
websocket.binaryType = 'arraybuffer'
|
|
|
|
|
|
|
|
|
|
|
|
websocket.onopen = () => {
|
|
|
|
|
|
isSocketConnected.value = true
|
|
|
|
|
|
|
|
|
|
|
|
// 发送 StartTranscription 消息(参考 demo.html)
|
|
|
|
|
|
const startTranscriptionMessage = {
|
|
|
|
|
|
header: {
|
|
|
|
|
|
appkey: appkey, // 不影响使用,可留空或由 wsUrl 带入
|
|
|
|
|
|
namespace: 'SpeechTranscriber',
|
|
|
|
|
|
name: 'StartTranscription',
|
|
|
|
|
|
task_id: generateUUID(),
|
|
|
|
|
|
message_id: generateUUID()
|
|
|
|
|
|
},
|
|
|
|
|
|
payload: {
|
|
|
|
|
|
format: 'pcm',
|
|
|
|
|
|
sample_rate: 16000,
|
|
|
|
|
|
enable_intermediate_result: true,
|
|
|
|
|
|
enable_punctuation_prediction: true,
|
|
|
|
|
|
enable_inverse_text_normalization: true
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
websocket.send(JSON.stringify(startTranscriptionMessage))
|
|
|
|
|
|
resolve()
|
2025-04-07 09:10:55 +08:00
|
|
|
|
}
|
2025-07-22 15:20:21 +08:00
|
|
|
|
|
|
|
|
|
|
websocket.onerror = (e) => {
|
|
|
|
|
|
isSocketConnected.value = false
|
|
|
|
|
|
reject(e)
|
2025-04-07 09:10:55 +08:00
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
websocket.onclose = () => {
|
|
|
|
|
|
isSocketConnected.value = false
|
2025-03-28 15:19:42 +08:00
|
|
|
|
}
|
2025-04-07 09:10:55 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
websocket.onmessage = (e) => {
|
|
|
|
|
|
const msg = JSON.parse(e.data)
|
|
|
|
|
|
const name = msg?.header?.name
|
|
|
|
|
|
const payload = msg?.payload
|
|
|
|
|
|
|
|
|
|
|
|
switch (name) {
|
|
|
|
|
|
case 'TranscriptionResultChanged': {
|
|
|
|
|
|
// 中间识别文本(可选:使用 stash_result.unfixedText 更精确)
|
|
|
|
|
|
const text = payload?.unfixed_result || payload?.result || ''
|
|
|
|
|
|
lastFinalText.value = text
|
|
|
|
|
|
break
|
|
|
|
|
|
}
|
|
|
|
|
|
case 'SentenceBegin': {
|
|
|
|
|
|
// 可选:开始新的一句,重置状态
|
|
|
|
|
|
// console.log('开始新的句子识别')
|
|
|
|
|
|
break
|
|
|
|
|
|
}
|
|
|
|
|
|
case 'SentenceEnd': {
|
|
|
|
|
|
const text = payload?.result || ''
|
|
|
|
|
|
const confidence = payload?.confidence || 0
|
|
|
|
|
|
if (text && confidence > 0.5) {
|
|
|
|
|
|
recognizedText.value += text
|
|
|
|
|
|
lastFinalText.value = ''
|
|
|
|
|
|
// console.log('识别完成:', {
|
|
|
|
|
|
// text,
|
|
|
|
|
|
// confidence
|
|
|
|
|
|
// })
|
|
|
|
|
|
}
|
|
|
|
|
|
break
|
|
|
|
|
|
}
|
|
|
|
|
|
case 'TranscriptionStarted': {
|
|
|
|
|
|
// console.log('识别任务已开始')
|
|
|
|
|
|
break
|
|
|
|
|
|
}
|
|
|
|
|
|
case 'TranscriptionCompleted': {
|
|
|
|
|
|
lastFinalText.value = ''
|
2026-01-23 22:01:38 +08:00
|
|
|
|
isRecognizing.value = false // 识别完成,重置状态
|
2025-07-22 15:20:21 +08:00
|
|
|
|
// console.log('识别全部完成')
|
2026-01-23 22:01:38 +08:00
|
|
|
|
cleanup()
|
2025-07-22 15:20:21 +08:00
|
|
|
|
break
|
|
|
|
|
|
}
|
|
|
|
|
|
case 'TaskFailed': {
|
|
|
|
|
|
console.error('识别失败:', msg?.header?.status_text)
|
2026-01-23 22:01:38 +08:00
|
|
|
|
isRecognizing.value = false // 识别失败,重置状态
|
|
|
|
|
|
cleanup()
|
2025-07-22 15:20:21 +08:00
|
|
|
|
break
|
|
|
|
|
|
}
|
|
|
|
|
|
default:
|
|
|
|
|
|
console.log('未知消息类型:', name, msg)
|
|
|
|
|
|
break
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
})
|
|
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
|
|
|
|
|
const startRecording = async () => {
|
2025-07-22 15:20:21 +08:00
|
|
|
|
if (isRecording.value) return
|
2025-10-21 22:58:47 +08:00
|
|
|
|
|
|
|
|
|
|
// #ifdef MP-WEIXIN
|
2026-01-23 22:01:38 +08:00
|
|
|
|
try {
|
|
|
|
|
|
recognizedText.value = ''
|
|
|
|
|
|
lastFinalText.value = ''
|
|
|
|
|
|
// 开始录音时不设置isRecognizing为true,只有在停止录音后才保持isRecognizing为true
|
|
|
|
|
|
|
|
|
|
|
|
const recorderManager = uni.getRecorderManager()
|
|
|
|
|
|
|
|
|
|
|
|
// 监听录音完成事件
|
|
|
|
|
|
recorderManager.onStop(async (res) => {
|
|
|
|
|
|
console.log('小程序录音完成:', res)
|
|
|
|
|
|
try {
|
|
|
|
|
|
// 停止录音后设置isRecognizing为true,显示loading
|
|
|
|
|
|
isRecognizing.value = true
|
|
|
|
|
|
|
|
|
|
|
|
// 打印请求配置,便于调试
|
|
|
|
|
|
console.log('准备上传语音识别请求配置:', {
|
|
|
|
|
|
url: config.vioceBaseURl,
|
|
|
|
|
|
name: 'file',
|
|
|
|
|
|
method: 'POST',
|
|
|
|
|
|
fileType: 'audio',
|
|
|
|
|
|
filePath: res.tempFilePath
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
// 上传录音文件到服务器进行语音识别
|
|
|
|
|
|
const uploadResult = await uni.uploadFile({
|
|
|
|
|
|
url: config.vioceBaseURl,
|
|
|
|
|
|
filePath: res.tempFilePath,
|
|
|
|
|
|
name: 'file',
|
|
|
|
|
|
fileType: 'audio',
|
|
|
|
|
|
method: 'POST' // 显式设置为POST请求
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
console.log('语音识别上传结果:', uploadResult)
|
|
|
|
|
|
|
|
|
|
|
|
if (uploadResult.statusCode === 200) {
|
|
|
|
|
|
try {
|
|
|
|
|
|
const result = JSON.parse(uploadResult.data)
|
|
|
|
|
|
console.log('语音识别结果:', result)
|
|
|
|
|
|
if (result.code === 200 && result.data) {
|
|
|
|
|
|
recognizedText.value = result.data
|
|
|
|
|
|
console.log('语音识别成功,识别结果:', recognizedText.value)
|
|
|
|
|
|
// 语音识别成功后,自动发送消息
|
|
|
|
|
|
// 这里需要触发一个事件,让父组件知道识别成功
|
|
|
|
|
|
// 或者直接调用发送消息的方法
|
|
|
|
|
|
isRecognizing.value = false // 识别成功,重置状态
|
|
|
|
|
|
} else {
|
|
|
|
|
|
console.error('语音识别返回错误:', result.message || '未知错误')
|
|
|
|
|
|
$api.msg('语音识别失败,请重试')
|
|
|
|
|
|
isRecognizing.value = false // 识别失败,重置状态
|
|
|
|
|
|
}
|
|
|
|
|
|
} catch (parseErr) {
|
|
|
|
|
|
console.error('语音识别结果解析失败:', parseErr)
|
|
|
|
|
|
$api.msg('语音识别失败,请重试')
|
|
|
|
|
|
isRecognizing.value = false // 解析失败,重置状态
|
|
|
|
|
|
}
|
|
|
|
|
|
} else {
|
|
|
|
|
|
console.error('语音识别请求失败,状态码:', uploadResult.statusCode)
|
|
|
|
|
|
$api.msg('语音识别失败,请重试')
|
|
|
|
|
|
isRecognizing.value = false // 请求失败,重置状态
|
|
|
|
|
|
}
|
|
|
|
|
|
} catch (err) {
|
|
|
|
|
|
console.error('语音识别上传失败:', err)
|
|
|
|
|
|
$api.msg('语音识别失败,请重试')
|
|
|
|
|
|
isRecognizing.value = false // 上传失败,重置状态
|
|
|
|
|
|
}
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
// 监听录音错误事件
|
|
|
|
|
|
recorderManager.onError((err) => {
|
|
|
|
|
|
console.error('小程序录音错误:', err)
|
|
|
|
|
|
$api.msg('录音失败,请重试');
|
|
|
|
|
|
cleanup()
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
// 微信小程序录音API
|
|
|
|
|
|
await recorderManager.start({
|
|
|
|
|
|
duration: 60000, // 最长录音60秒
|
|
|
|
|
|
sampleRate: 16000,
|
|
|
|
|
|
numberOfChannels: 1,
|
|
|
|
|
|
encodeBitRate: 96000,
|
|
|
|
|
|
format: 'mp3'
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
isRecording.value = true
|
|
|
|
|
|
recordingDuration.value = 0
|
|
|
|
|
|
durationTimer = setInterval(() => recordingDuration.value++, 1000)
|
|
|
|
|
|
|
|
|
|
|
|
// 监听录音事件(可选)
|
|
|
|
|
|
recorderManager.onFrameRecorded((res) => {
|
|
|
|
|
|
// 更新音量显示
|
|
|
|
|
|
volumeLevel.value = res.volume || 0
|
|
|
|
|
|
audioDataForDisplay.value = Array(16).fill(volumeLevel.value)
|
|
|
|
|
|
})
|
|
|
|
|
|
} catch (err) {
|
|
|
|
|
|
console.error('小程序录音启动失败:', err)
|
|
|
|
|
|
$api.msg('录音启动失败,请重试');
|
|
|
|
|
|
cleanup()
|
|
|
|
|
|
}
|
2025-10-21 22:58:47 +08:00
|
|
|
|
return;
|
|
|
|
|
|
// #endif
|
|
|
|
|
|
|
|
|
|
|
|
// #ifdef H5
|
2025-03-28 15:19:42 +08:00
|
|
|
|
try {
|
2025-10-21 22:58:47 +08:00
|
|
|
|
if (typeof navigator === 'undefined' || !navigator.mediaDevices) {
|
|
|
|
|
|
$api.msg('当前环境不支持录音功能');
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
recognizedText.value = ''
|
|
|
|
|
|
lastFinalText.value = ''
|
2026-01-23 22:01:38 +08:00
|
|
|
|
// 开始录音时不设置isRecognizing为true,只有在停止录音后才保持isRecognizing为true
|
2025-07-22 15:20:21 +08:00
|
|
|
|
await connectWebSocket()
|
|
|
|
|
|
|
|
|
|
|
|
audioStream = await navigator.mediaDevices.getUserMedia({
|
|
|
|
|
|
audio: true
|
|
|
|
|
|
})
|
|
|
|
|
|
audioContext = new(window.AudioContext || window.webkitAudioContext)({
|
|
|
|
|
|
sampleRate: 16000
|
|
|
|
|
|
})
|
|
|
|
|
|
audioInput = audioContext.createMediaStreamSource(audioStream)
|
|
|
|
|
|
scriptProcessor = audioContext.createScriptProcessor(2048, 1, 1)
|
|
|
|
|
|
|
|
|
|
|
|
scriptProcessor.onaudioprocess = (event) => {
|
|
|
|
|
|
const input = event.inputBuffer.getChannelData(0)
|
|
|
|
|
|
const pcm = new Int16Array(input.length)
|
|
|
|
|
|
let sum = 0
|
|
|
|
|
|
for (let i = 0; i < input.length; ++i) {
|
|
|
|
|
|
const s = Math.max(-1, Math.min(1, input[i]))
|
|
|
|
|
|
pcm[i] = s * 0x7FFF
|
|
|
|
|
|
sum += s * s
|
2025-03-28 15:19:42 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
volumeLevel.value = Math.sqrt(sum / input.length)
|
|
|
|
|
|
audioDataForDisplay.value = Array(16).fill(volumeLevel.value)
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
if (websocket?.readyState === WebSocket.OPEN) {
|
|
|
|
|
|
websocket.send(pcm.buffer)
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
audioInput.connect(scriptProcessor)
|
|
|
|
|
|
scriptProcessor.connect(audioContext.destination)
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
isRecording.value = true
|
|
|
|
|
|
recordingDuration.value = 0
|
|
|
|
|
|
durationTimer = setInterval(() => recordingDuration.value++, 1000)
|
|
|
|
|
|
} catch (err) {
|
|
|
|
|
|
console.error('启动失败:', err)
|
2026-01-23 22:01:38 +08:00
|
|
|
|
isRecognizing.value = false // 启动失败,重置状态
|
2025-07-22 15:20:21 +08:00
|
|
|
|
cleanup()
|
|
|
|
|
|
}
|
2025-10-21 22:58:47 +08:00
|
|
|
|
// #endif
|
2025-07-22 15:20:21 +08:00
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
const stopRecording = () => {
|
|
|
|
|
|
if (!isRecording.value || isStopping.value) return
|
|
|
|
|
|
isStopping.value = true
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2026-01-23 22:01:38 +08:00
|
|
|
|
// #ifdef MP-WEIXIN
|
|
|
|
|
|
uni.getRecorderManager().stop()
|
|
|
|
|
|
// 小程序中录音停止后会触发onStop事件,在onStop事件中处理识别结果和状态重置
|
|
|
|
|
|
// #endif
|
|
|
|
|
|
|
|
|
|
|
|
// #ifdef H5
|
2025-07-22 15:20:21 +08:00
|
|
|
|
if (websocket?.readyState === WebSocket.OPEN) {
|
|
|
|
|
|
websocket.send(JSON.stringify({
|
|
|
|
|
|
header: {
|
|
|
|
|
|
namespace: 'SpeechTranscriber',
|
|
|
|
|
|
name: 'StopTranscription',
|
|
|
|
|
|
message_id: generateUUID()
|
|
|
|
|
|
}
|
|
|
|
|
|
}))
|
2026-01-23 22:01:38 +08:00
|
|
|
|
// H5中不立即调用cleanup,等待识别完成
|
2025-07-22 15:20:21 +08:00
|
|
|
|
}
|
2026-01-23 22:01:38 +08:00
|
|
|
|
// #endif
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2026-01-23 22:01:38 +08:00
|
|
|
|
// 只清理录音相关资源,不重置识别状态
|
|
|
|
|
|
clearInterval(durationTimer)
|
|
|
|
|
|
audioStream?.getTracks().forEach(track => track.stop())
|
|
|
|
|
|
audioContext?.close()
|
|
|
|
|
|
audioStream = null
|
|
|
|
|
|
audioContext = null
|
|
|
|
|
|
audioInput = null
|
|
|
|
|
|
scriptProcessor = null
|
|
|
|
|
|
|
|
|
|
|
|
isRecording.value = false
|
|
|
|
|
|
isSocketConnected.value = false
|
2025-07-22 15:20:21 +08:00
|
|
|
|
isStopping.value = false
|
|
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
const cancelRecording = () => {
|
|
|
|
|
|
if (!isRecording.value || isStopping.value) return
|
|
|
|
|
|
isStopping.value = true
|
2026-01-23 22:01:38 +08:00
|
|
|
|
|
|
|
|
|
|
// #ifdef MP-WEIXIN
|
|
|
|
|
|
uni.getRecorderManager().stop()
|
|
|
|
|
|
// #endif
|
|
|
|
|
|
|
|
|
|
|
|
// #ifdef H5
|
2025-07-22 15:20:21 +08:00
|
|
|
|
websocket?.close()
|
2026-01-23 22:01:38 +08:00
|
|
|
|
// #endif
|
|
|
|
|
|
|
|
|
|
|
|
// 取消录音时重置所有状态
|
2025-07-22 15:20:21 +08:00
|
|
|
|
cleanup()
|
2026-01-23 22:01:38 +08:00
|
|
|
|
isRecognizing.value = false
|
2025-07-22 15:20:21 +08:00
|
|
|
|
isStopping.value = false
|
|
|
|
|
|
}
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
|
|
|
|
|
const cleanup = () => {
|
2025-07-22 15:20:21 +08:00
|
|
|
|
clearInterval(durationTimer)
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
scriptProcessor?.disconnect()
|
|
|
|
|
|
audioInput?.disconnect()
|
|
|
|
|
|
audioStream?.getTracks().forEach(track => track.stop())
|
|
|
|
|
|
audioContext?.close()
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
audioStream = null
|
|
|
|
|
|
audioContext = null
|
|
|
|
|
|
audioInput = null
|
|
|
|
|
|
scriptProcessor = null
|
|
|
|
|
|
websocket = null
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2025-07-22 15:20:21 +08:00
|
|
|
|
isRecording.value = false
|
|
|
|
|
|
isSocketConnected.value = false
|
2026-01-23 22:01:38 +08:00
|
|
|
|
isRecognizing.value = false // 停止录音,重置识别状态
|
2025-07-22 15:20:21 +08:00
|
|
|
|
}
|
2025-04-07 09:10:55 +08:00
|
|
|
|
|
2025-03-28 15:19:42 +08:00
|
|
|
|
onUnmounted(() => {
|
2025-07-22 15:20:21 +08:00
|
|
|
|
if (isRecording.value) stopRecording()
|
|
|
|
|
|
})
|
2025-03-28 15:19:42 +08:00
|
|
|
|
|
2026-01-23 22:01:38 +08:00
|
|
|
|
const reset = () => {
|
|
|
|
|
|
cleanup()
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-03-28 15:19:42 +08:00
|
|
|
|
return {
|
|
|
|
|
|
isRecording,
|
|
|
|
|
|
isStopping,
|
|
|
|
|
|
isSocketConnected,
|
2026-01-23 22:01:38 +08:00
|
|
|
|
isRecognizing,
|
2025-03-28 15:19:42 +08:00
|
|
|
|
recordingDuration,
|
2025-04-07 09:10:55 +08:00
|
|
|
|
audioDataForDisplay,
|
|
|
|
|
|
volumeLevel,
|
|
|
|
|
|
recognizedText,
|
|
|
|
|
|
lastFinalText,
|
2025-07-22 15:20:21 +08:00
|
|
|
|
startRecording,
|
|
|
|
|
|
stopRecording,
|
2026-01-23 22:01:38 +08:00
|
|
|
|
cancelRecording,
|
|
|
|
|
|
reset
|
2025-07-22 15:20:21 +08:00
|
|
|
|
}
|
2025-04-07 09:10:55 +08:00
|
|
|
|
}
|