flat: 添加语音识别sdk+ 文件检测

This commit is contained in:
Apcallover
2025-12-19 10:25:10 +08:00
parent 4c29882f36
commit 4befbb05cc
9 changed files with 1080 additions and 1141 deletions

459
hook/piper-bundle.js Normal file
View File

@@ -0,0 +1,459 @@
/**
* PiperTTS Bundle (SDK + Worker + PCMPlayer)
* Fix: Smart End Detection that supports Pause/Resume
*/
class PCMPlayer {
constructor(options) {
this.init(options);
}
init(options) {
this.option = Object.assign({}, {
inputCodec: 'Int16',
channels: 1,
sampleRate: 16000,
flushTime: 50,
fftSize: 2048,
}, options);
this.samples = new Float32Array();
this.interval = setInterval(this.flush.bind(this), this.option.flushTime);
this.convertValue = this.getConvertValue();
this.typedArray = this.getTypedArray();
this.initAudioContext();
this.bindAudioContextEvent();
}
getConvertValue() {
const map = {
Int8: 128,
Int16: 32768,
Int32: 2147483648,
Float32: 1
};
if (!map[this.option.inputCodec]) throw new Error('Codec Error');
return map[this.option.inputCodec];
}
getTypedArray() {
const map = {
Int8: Int8Array,
Int16: Int16Array,
Int32: Int32Array,
Float32: Float32Array
};
if (!map[this.option.inputCodec]) throw new Error('Codec Error');
return map[this.option.inputCodec];
}
initAudioContext() {
this.audioCtx = new(window.AudioContext || window.webkitAudioContext)();
this.gainNode = this.audioCtx.createGain();
this.gainNode.gain.value = 1.0;
this.gainNode.connect(this.audioCtx.destination);
this.startTime = this.audioCtx.currentTime;
this.analyserNode = this.audioCtx.createAnalyser();
this.analyserNode.fftSize = this.option.fftSize;
}
static isTypedArray(data) {
return (data.byteLength && data.buffer && data.buffer.constructor == ArrayBuffer) || data.constructor ==
ArrayBuffer;
}
isSupported(data) {
if (!PCMPlayer.isTypedArray(data)) throw new Error('Data must be ArrayBuffer or TypedArray');
return true;
}
feed(data) {
this.isSupported(data);
data = this.getFormattedValue(data);
const tmp = new Float32Array(this.samples.length + data.length);
tmp.set(this.samples, 0);
tmp.set(data, this.samples.length);
this.samples = tmp;
}
getFormattedValue(data) {
data = data.constructor == ArrayBuffer ? new this.typedArray(data) : new this.typedArray(data.buffer);
let float32 = new Float32Array(data.length);
for (let i = 0; i < data.length; i++) {
float32[i] = data[i] / this.convertValue;
}
return float32;
}
volume(val) {
this.gainNode.gain.value = val;
}
destroy() {
if (this.interval) clearInterval(this.interval);
this.samples = null;
if (this.audioCtx) {
this.audioCtx.close();
this.audioCtx = null;
}
}
flush() {
if (!this.samples.length) return;
const bufferSource = this.audioCtx.createBufferSource();
if (typeof this.option.onended === 'function') {
bufferSource.onended = (e) => this.option.onended(this, e);
}
const length = this.samples.length / this.option.channels;
const audioBuffer = this.audioCtx.createBuffer(this.option.channels, length, this.option.sampleRate);
for (let channel = 0; channel < this.option.channels; channel++) {
const audioData = audioBuffer.getChannelData(channel);
let offset = channel;
let decrement = 50;
for (let i = 0; i < length; i++) {
audioData[i] = this.samples[offset];
if (i < 50) audioData[i] = (audioData[i] * i) / 50;
if (i >= length - 51) audioData[i] = (audioData[i] * decrement--) / 50;
offset += this.option.channels;
}
}
if (this.startTime < this.audioCtx.currentTime) {
this.startTime = this.audioCtx.currentTime;
}
bufferSource.buffer = audioBuffer;
bufferSource.connect(this.gainNode);
bufferSource.connect(this.analyserNode);
bufferSource.start(this.startTime);
this.startTime += audioBuffer.duration;
this.samples = new Float32Array();
}
async pause() {
await this.audioCtx.suspend();
}
async continue () {
await this.audioCtx.resume();
}
bindAudioContextEvent() {
if (typeof this.option.onstatechange === 'function') {
this.audioCtx.onstatechange = (e) => {
this.option.onstatechange(this, e, this.audioCtx.state);
};
}
}
}
// ==========================================
// Worker 源码
// ==========================================
const WORKER_SOURCE = `
let globalWs = null;
self.onmessage = function (e) {
const { type, data } = e.data;
switch (type) {
case 'connect': connectWebSocket(data); break;
case 'stop': closeWs(); break;
}
};
function closeWs() {
if (globalWs) {
globalWs.onerror = null;
globalWs.onclose = null;
globalWs.onmessage = null;
try { globalWs.close(1000, 'User stopped'); } catch (e) {}
globalWs = null;
}
}
function connectWebSocket(config) {
closeWs();
const { url, text, options } = config;
self.postMessage({ type: 'status', data: 'ws_connecting' });
try {
const currentWs = new WebSocket(url);
currentWs.binaryType = 'arraybuffer';
globalWs = currentWs;
currentWs.onopen = () => {
if (globalWs !== currentWs) return;
self.postMessage({ type: 'status', data: 'ws_connected' });
currentWs.send(JSON.stringify({
text: text,
speaker_id: options.speakerId || 0,
length_scale: options.lengthScale || 1.0,
noise_scale: options.noiseScale || 0.667,
}));
self.postMessage({ type: 'status', data: 'generating' });
};
currentWs.onmessage = (event) => {
if (globalWs !== currentWs) return;
if (typeof event.data === 'string' && event.data === 'END') {
const wsToClose = currentWs;
globalWs = null;
wsToClose.onmessage = null;
wsToClose.onerror = null;
wsToClose.onclose = null;
try { wsToClose.close(1000, 'Done'); } catch(e) {}
self.postMessage({ type: 'end' });
} else {
self.postMessage({ type: 'audio-data', buffer: event.data }, [event.data]);
}
};
currentWs.onclose = (e) => {
if (globalWs === currentWs) {
self.postMessage({ type: 'end' });
globalWs = null;
}
};
currentWs.onerror = () => {
if (globalWs === currentWs) {
self.postMessage({ type: 'error', data: 'WebSocket error' });
}
};
} catch (e) {
self.postMessage({ type: 'error', data: e.message });
}
}
`;
// ==========================================
// PiperTTS SDK
// ==========================================
class PiperTTS {
constructor(config = {}) {
this.baseUrl = config.baseUrl || 'http://localhost:5001';
this.onStatus = config.onStatus || console.log;
this.onStart = config.onStart || (() => {});
this.onEnd = config.onEnd || (() => {});
this.sampleRate = config.sampleRate || 16000;
this.player = null;
this.worker = null;
this.recordedChunks = [];
this.isRecording = false;
// 新增:检测音频结束的定时器 ID
this.endCheckInterval = null;
this._initWorker();
}
_initWorker() {
const blob = new Blob([WORKER_SOURCE], {
type: 'application/javascript'
});
this.worker = new Worker(URL.createObjectURL(blob));
this.worker.onmessage = (e) => {
const {
type,
data,
buffer
} = e.data;
switch (type) {
case 'status':
const map = {
ws_connecting: '正在连接...',
ws_connected: '已连接',
generating: '流式接收中...'
};
this.onStatus(map[data] || data, 'processing');
break;
case 'error':
if (this.recordedChunks.length > 0) {
this.onStatus('数据接收完毕', 'success');
this._triggerEndWithDelay();
} else {
this.onStatus(`错误: ${data}`, 'error');
this.stop();
}
break;
case 'audio-data':
this._handleAudio(buffer);
break;
case 'end':
this.onStatus('数据接收完毕', 'success');
this._triggerEndWithDelay();
break;
}
};
}
/**
* 【核心修改】智能轮询检测
* 只有当 AudioContext 处于 running 状态且时间走完时,才触发 onEnd
*/
_triggerEndWithDelay() {
// 先清除可能存在的旧定时器
if (this.endCheckInterval) clearInterval(this.endCheckInterval);
// 每 200ms 检查一次
this.endCheckInterval = setInterval(() => {
// 1. 如果播放器没了,直接结束
if (!this.player || !this.player.audioCtx) {
this._finishEndCheck();
return;
}
// 2. 如果处于暂停状态 (suspended),什么都不做,继续等
if (this.player.audioCtx.state === 'suspended') {
return;
}
// 3. 计算剩余时间
// startTime 是缓冲区结束的绝对时间currentTime 是当前时间
const remainingTime = this.player.startTime - this.player.audioCtx.currentTime;
// 4. 如果剩余时间小于 0.1秒(留点冗余),说明播完了
if (remainingTime <= 0.1) {
this._finishEndCheck();
}
}, 200);
}
_finishEndCheck() {
if (this.endCheckInterval) {
clearInterval(this.endCheckInterval);
this.endCheckInterval = null;
}
this.onEnd();
}
_initPlayer() {
if (this.player) {
this.player.destroy();
}
this.player = new PCMPlayer({
inputCodec: 'Int16',
channels: 1,
sampleRate: this.sampleRate,
flushTime: 50,
});
}
async speak(text, options = {}) {
if (!text) return;
this.stop();
this._initPlayer();
if (this.player) {
await this.player.continue();
}
this.recordedChunks = [];
this.isRecording = true;
this.onStart();
const wsUrl = this.baseUrl.replace(/^http/, 'ws') + '/ws/synthesize';
this.worker.postMessage({
type: 'connect',
data: {
url: wsUrl,
text,
options
},
});
}
stop() {
// 停止时必须清除轮询检测
if (this.endCheckInterval) {
clearInterval(this.endCheckInterval);
this.endCheckInterval = null;
}
this.worker.postMessage({
type: 'stop'
});
if (this.player) {
this.player.destroy();
this.player = null;
}
this.onStatus('已停止', 'default');
}
_handleAudio(arrayBuffer) {
if (this.isRecording) {
this.recordedChunks.push(arrayBuffer);
}
if (this.player) {
this.player.feed(arrayBuffer);
}
}
getAnalyserNode() {
return this.player ? this.player.analyserNode : null;
}
downloadAudio(filename = 'tts_output.wav') {
if (this.recordedChunks.length === 0) return;
let totalLen = 0;
for (let chunk of this.recordedChunks) totalLen += chunk.byteLength;
const tmp = new Uint8Array(totalLen);
let offset = 0;
for (let chunk of this.recordedChunks) {
tmp.set(new Uint8Array(chunk), offset);
offset += chunk.byteLength;
}
const wavBuffer = this._encodeWAV(new Int16Array(tmp.buffer), this.sampleRate);
const blob = new Blob([wavBuffer], {
type: 'audio/wav'
});
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.style = 'display: none';
a.href = url;
a.download = filename;
document.body.appendChild(a);
a.click();
window.URL.revokeObjectURL(url);
}
_encodeWAV(samples, sampleRate) {
const buffer = new ArrayBuffer(44 + samples.length * 2);
const view = new DataView(buffer);
const writeString = (view, offset, string) => {
for (let i = 0; i < string.length; i++) view.setUint8(offset + i, string.charCodeAt(i));
};
writeString(view, 0, 'RIFF');
view.setUint32(4, 36 + samples.length * 2, true);
writeString(view, 8, 'WAVE');
writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
view.setUint16(22, 1, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 2, true);
view.setUint16(32, 2, true);
view.setUint16(34, 16, true);
writeString(view, 36, 'data');
view.setUint32(40, samples.length * 2, true);
let offset = 44;
for (let i = 0; i < samples.length; i++) {
view.setInt16(offset, samples[i], true);
offset += 2;
}
return view;
}
}
export default PiperTTS;

View File

@@ -3,255 +3,344 @@ import {
onUnmounted
} from 'vue'
import {
$api,
} from '../common/globalFunction';
$api
} from '../common/globalFunction'; // 你的请求封装
import config from '@/config'
// Alibaba Cloud
// 开源
export function useAudioRecorder() {
// --- 状态定义 ---
const isRecording = ref(false)
const isStopping = ref(false)
const isSocketConnected = ref(false)
const recordingDuration = ref(0)
const audioDataForDisplay = ref(new Array(16).fill(0))
const volumeLevel = ref(0)
const volumeLevel = ref(0) // 0-100
const recognizedText = ref('')
const lastFinalText = ref('')
let audioStream = null
let audioContext = null
let audioInput = null
let scriptProcessor = null
let websocket = null
// --- 内部变量 ---
let socketTask = null
let durationTimer = null
const generateUUID = () => {
return ([1e7] + -1e3 + -4e3 + -8e3 + -1e11)
.replace(/[018]/g, c =>
(c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
).replace(/-/g, '')
// --- APP/小程序 变量 ---
let recorderManager = null;
// --- H5 变量 ---
let audioContext = null;
let scriptProcessor = null;
let mediaStreamSource = null;
let h5Stream = null;
// --- 配置项 ---
const RECORD_CONFIG = {
duration: 600000,
sampleRate: 16000,
numberOfChannels: 1,
format: 'pcm',
frameSize: 4096
}
const fetchWsUrl = async () => {
const res = await $api.createRequest('/app/speech/getToken')
if (res.code !== 200) throw new Error('无法获取语音识别 wsUrl')
const wsUrl = res.msg
return wsUrl
/**
* 获取 WebSocket 地址 (含 Token)
*/
const getWsUrl = async () => {
let wsUrl = config.vioceBaseURl
// 拼接 Token
const token = uni.getStorageSync('token') || '';
if (token) {
const separator = wsUrl.includes('?') ? '&' : '?';
wsUrl = `${wsUrl}${separator}token=${encodeURIComponent(token)}`;
}
return wsUrl;
}
function extractWsParams(wsUrl) {
const url = new URL(wsUrl)
const appkey = url.searchParams.get('appkey')
const token = url.searchParams.get('token')
return {
appkey,
token
/**
* 开始录音 (入口)
*/
const startRecording = async () => {
if (isRecording.value) return
try {
recognizedText.value = ''
volumeLevel.value = 0
// #ifdef H5
if (location.protocol !== 'https:' && location.hostname !== 'localhost') {
uni.showToast({
title: 'H5录音需要HTTPS环境',
icon: 'none'
});
return;
}
// #endif
const url = await getWsUrl()
console.log('正在连接 ASR:', url)
await connectSocket(url);
} catch (err) {
console.error('启动失败:', err);
uni.showToast({
title: '启动失败: ' + (err.message || ''),
icon: 'none'
});
cleanup();
}
}
const connectWebSocket = async () => {
const wsUrl = await fetchWsUrl()
const {
appkey,
token
} = extractWsParams(wsUrl)
/**
* 连接 WebSocket
*/
const connectSocket = (url) => {
return new Promise((resolve, reject) => {
websocket = new WebSocket(wsUrl)
websocket.binaryType = 'arraybuffer'
socketTask = uni.connectSocket({
url: url,
success: () => console.log('Socket 连接请求发送'),
fail: (err) => reject(err)
});
websocket.onopen = () => {
isSocketConnected.value = true
socketTask.onOpen((res) => {
console.log('WebSocket 已连接');
isSocketConnected.value = true;
// 发送 StartTranscription 消息(参考 demo.html
const startTranscriptionMessage = {
header: {
appkey: appkey, // 不影响使用,可留空或由 wsUrl 带入
namespace: 'SpeechTranscriber',
name: 'StartTranscription',
task_id: generateUUID(),
message_id: generateUUID()
},
payload: {
format: 'pcm',
sample_rate: 16000,
enable_intermediate_result: true,
enable_punctuation_prediction: true,
enable_inverse_text_normalization: true
}
// #ifdef H5
startH5Recording().then(() => resolve()).catch(err => {
socketTask.close();
reject(err);
});
// #endif
// #ifndef H5
startAppRecording();
resolve();
// #endif
});
socketTask.onMessage((res) => {
// 接收文本结果
if (res.data) {
recognizedText.value = res.data;
}
websocket.send(JSON.stringify(startTranscriptionMessage))
resolve()
}
});
websocket.onerror = (e) => {
isSocketConnected.value = false
reject(e)
}
socketTask.onError((err) => {
console.error('Socket 错误:', err);
isSocketConnected.value = false;
stopRecording();
});
websocket.onclose = () => {
isSocketConnected.value = false
}
websocket.onmessage = (e) => {
const msg = JSON.parse(e.data)
const name = msg?.header?.name
const payload = msg?.payload
switch (name) {
case 'TranscriptionResultChanged': {
// 中间识别文本(可选:使用 stash_result.unfixedText 更精确)
const text = payload?.unfixed_result || payload?.result || ''
lastFinalText.value = text
break
}
case 'SentenceBegin': {
// 可选:开始新的一句,重置状态
// console.log('开始新的句子识别')
break
}
case 'SentenceEnd': {
const text = payload?.result || ''
const confidence = payload?.confidence || 0
if (text && confidence > 0.5) {
recognizedText.value += text
lastFinalText.value = ''
// console.log('识别完成:', {
// text,
// confidence
// })
}
break
}
case 'TranscriptionStarted': {
// console.log('识别任务已开始')
break
}
case 'TranscriptionCompleted': {
lastFinalText.value = ''
// console.log('识别全部完成')
break
}
case 'TaskFailed': {
console.error('识别失败:', msg?.header?.status_text)
break
}
default:
console.log('未知消息类型:', name, msg)
break
}
}
socketTask.onClose(() => {
isSocketConnected.value = false;
console.log('Socket 已关闭');
});
})
}
const startRecording = async () => {
if (isRecording.value) return
const startH5Recording = async () => {
try {
recognizedText.value = ''
lastFinalText.value = ''
await connectWebSocket()
audioStream = await navigator.mediaDevices.getUserMedia({
// 1. 获取麦克风流
const stream = await navigator.mediaDevices.getUserMedia({
audio: true
})
audioContext = new(window.AudioContext || window.webkitAudioContext)({
});
h5Stream = stream;
// 2. 创建 AudioContext
const AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext({
sampleRate: 16000
})
audioInput = audioContext.createMediaStreamSource(audioStream)
scriptProcessor = audioContext.createScriptProcessor(2048, 1, 1)
});
mediaStreamSource = audioContext.createMediaStreamSource(stream);
scriptProcessor = audioContext.createScriptProcessor(4096, 1, 1);
scriptProcessor.onaudioprocess = (event) => {
const input = event.inputBuffer.getChannelData(0)
const pcm = new Int16Array(input.length)
let sum = 0
for (let i = 0; i < input.length; ++i) {
const s = Math.max(-1, Math.min(1, input[i]))
pcm[i] = s * 0x7FFF
sum += s * s
if (!isSocketConnected.value || !socketTask) return;
const inputData = event.inputBuffer.getChannelData(0);
calculateVolume(inputData, true);
const buffer = new ArrayBuffer(inputData.length * 2);
const view = new DataView(buffer);
for (let i = 0; i < inputData.length; i++) {
let s = Math.max(-1, Math.min(1, inputData[i]));
view.setInt16(i * 2, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
volumeLevel.value = Math.sqrt(sum / input.length)
audioDataForDisplay.value = Array(16).fill(volumeLevel.value)
socketTask.send({
data: buffer,
fail: (e) => console.error('发送音频失败', e)
});
};
if (websocket?.readyState === WebSocket.OPEN) {
websocket.send(pcm.buffer)
}
}
mediaStreamSource.connect(scriptProcessor);
scriptProcessor.connect(audioContext.destination);
audioInput.connect(scriptProcessor)
scriptProcessor.connect(audioContext.destination)
isRecording.value = true;
recordingDuration.value = 0;
durationTimer = setInterval(() => recordingDuration.value++, 1000);
console.log('H5 录音已启动');
isRecording.value = true
recordingDuration.value = 0
durationTimer = setInterval(() => recordingDuration.value++, 1000)
} catch (err) {
console.error('启动失败:', err)
cleanup()
console.error('H5 录音启动失败:', err);
throw err;
}
}
const stopRecording = () => {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
const stopH5Resources = () => {
if (scriptProcessor) scriptProcessor.disconnect();
if (mediaStreamSource) mediaStreamSource.disconnect();
if (audioContext) audioContext.close();
if (h5Stream) h5Stream.getTracks().forEach(track => track.stop());
if (websocket?.readyState === WebSocket.OPEN) {
websocket.send(JSON.stringify({
header: {
namespace: 'SpeechTranscriber',
name: 'StopTranscription',
message_id: generateUUID()
}
}))
websocket.close()
scriptProcessor = null;
mediaStreamSource = null;
audioContext = null;
h5Stream = null;
}
const startAppRecording = () => {
recorderManager = uni.getRecorderManager();
recorderManager.onFrameRecorded((res) => {
const {
frameBuffer
} = res;
calculateVolume(frameBuffer, false);
if (isSocketConnected.value && socketTask) {
socketTask.send({
data: frameBuffer
});
}
});
recorderManager.onStart(() => {
console.log('APP 录音已开始');
isRecording.value = true;
recordingDuration.value = 0;
durationTimer = setInterval(() => recordingDuration.value++, 1000);
});
recorderManager.onError((err) => {
console.error('APP 录音报错:', err);
cleanup();
});
recorderManager.start(RECORD_CONFIG);
}
const stopHardwareResource = () => {
// APP/小程序停止
if (recorderManager) {
recorderManager.stop();
}
cleanup()
isStopping.value = false
// H5停止
// #ifdef H5
if (scriptProcessor) scriptProcessor.disconnect();
if (mediaStreamSource) mediaStreamSource.disconnect();
if (audioContext) audioContext.close();
if (h5Stream) h5Stream.getTracks().forEach(track => track.stop());
scriptProcessor = null;
mediaStreamSource = null;
audioContext = null;
h5Stream = null;
// #endif
}
/**
* 停止录音 (通用)
*/
const stopRecording = () => {
// 停止 APP 录音
if (recorderManager) {
recorderManager.stop();
}
// 停止 H5 录音资源
// #ifdef H5
stopH5Resources();
// #endif
// 关闭 Socket
if (socketTask) {
socketTask.close();
}
cleanup();
}
const cancelRecording = () => {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
websocket?.close()
cleanup()
isStopping.value = false
if (!isRecording.value) return;
console.log('取消录音 - 丢弃结果');
// 1. 停止硬件录音
stopHardwareResource();
// 2. 强制关闭 Socket
if (socketTask) {
socketTask.close();
}
// 3. 关键:清空已识别的文本
recognizedText.value = '';
// 4. 清理资源
cleanup();
}
/**
* 清理状态
*/
const cleanup = () => {
clearInterval(durationTimer)
clearInterval(durationTimer);
isRecording.value = false;
isSocketConnected.value = false;
socketTask = null;
recorderManager = null;
volumeLevel.value = 0;
}
scriptProcessor?.disconnect()
audioInput?.disconnect()
audioStream?.getTracks().forEach(track => track.stop())
audioContext?.close()
/**
* 计算音量 (兼容 Float32 和 Int16/ArrayBuffer)
*/
const calculateVolume = (data, isFloat32) => {
let sum = 0;
let length = 0;
audioStream = null
audioContext = null
audioInput = null
scriptProcessor = null
websocket = null
isRecording.value = false
isSocketConnected.value = false
if (isFloat32) {
length = data.length;
for (let i = 0; i < length; i += 10) {
sum += Math.abs(data[i]);
}
volumeLevel.value = Math.min(100, Math.floor((sum / (length / 10)) * 100 * 3));
} else {
const int16Data = new Int16Array(data);
length = int16Data.length;
for (let i = 0; i < length; i += 10) {
sum += Math.abs(int16Data[i]);
}
const avg = sum / (length / 10);
volumeLevel.value = Math.min(100, Math.floor((avg / 10000) * 100));
}
}
onUnmounted(() => {
if (isRecording.value) stopRecording()
if (isRecording.value) {
stopRecording();
}
})
return {
isRecording,
isStopping,
isSocketConnected,
recordingDuration,
audioDataForDisplay,
volumeLevel,
recognizedText,
lastFinalText,
startRecording,
stopRecording,
cancelRecording

View File

@@ -1,348 +0,0 @@
import {
ref,
onUnmounted
} from 'vue'
import {
$api
} from '../common/globalFunction'; // 你的请求封装
import config from '@/config'
// 开源
export function useAudioRecorder() {
// --- 状态定义 ---
const isRecording = ref(false)
const isSocketConnected = ref(false)
const recordingDuration = ref(0)
const volumeLevel = ref(0) // 0-100
const recognizedText = ref('')
// --- 内部变量 ---
let socketTask = null
let durationTimer = null
// --- APP/小程序 变量 ---
let recorderManager = null;
// --- H5 变量 ---
let audioContext = null;
let scriptProcessor = null;
let mediaStreamSource = null;
let h5Stream = null;
// --- 配置项 ---
const RECORD_CONFIG = {
duration: 600000,
sampleRate: 16000,
numberOfChannels: 1,
format: 'pcm',
frameSize: 4096
}
/**
* 获取 WebSocket 地址 (含 Token)
*/
const getWsUrl = async () => {
let wsUrl = config.vioceBaseURl
// 拼接 Token
const token = uni.getStorageSync('token') || '';
if (token) {
const separator = wsUrl.includes('?') ? '&' : '?';
wsUrl = `${wsUrl}${separator}token=${encodeURIComponent(token)}`;
}
return wsUrl;
}
/**
* 开始录音 (入口)
*/
const startRecording = async () => {
if (isRecording.value) return
try {
recognizedText.value = ''
volumeLevel.value = 0
// #ifdef H5
if (location.protocol !== 'https:' && location.hostname !== 'localhost') {
uni.showToast({
title: 'H5录音需要HTTPS环境',
icon: 'none'
});
return;
}
// #endif
const url = await getWsUrl()
console.log('正在连接 ASR:', url)
await connectSocket(url);
} catch (err) {
console.error('启动失败:', err);
uni.showToast({
title: '启动失败: ' + (err.message || ''),
icon: 'none'
});
cleanup();
}
}
/**
* 连接 WebSocket
*/
const connectSocket = (url) => {
return new Promise((resolve, reject) => {
socketTask = uni.connectSocket({
url: url,
success: () => console.log('Socket 连接请求发送'),
fail: (err) => reject(err)
});
socketTask.onOpen((res) => {
console.log('WebSocket 已连接');
isSocketConnected.value = true;
// #ifdef H5
startH5Recording().then(() => resolve()).catch(err => {
socketTask.close();
reject(err);
});
// #endif
// #ifndef H5
startAppRecording();
resolve();
// #endif
});
socketTask.onMessage((res) => {
// 接收文本结果
if (res.data) {
recognizedText.value = res.data;
}
});
socketTask.onError((err) => {
console.error('Socket 错误:', err);
isSocketConnected.value = false;
stopRecording();
});
socketTask.onClose(() => {
isSocketConnected.value = false;
console.log('Socket 已关闭');
});
})
}
const startH5Recording = async () => {
try {
// 1. 获取麦克风流
const stream = await navigator.mediaDevices.getUserMedia({
audio: true
});
h5Stream = stream;
// 2. 创建 AudioContext
const AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext({
sampleRate: 16000
});
mediaStreamSource = audioContext.createMediaStreamSource(stream);
scriptProcessor = audioContext.createScriptProcessor(4096, 1, 1);
scriptProcessor.onaudioprocess = (event) => {
if (!isSocketConnected.value || !socketTask) return;
const inputData = event.inputBuffer.getChannelData(0);
calculateVolume(inputData, true);
const buffer = new ArrayBuffer(inputData.length * 2);
const view = new DataView(buffer);
for (let i = 0; i < inputData.length; i++) {
let s = Math.max(-1, Math.min(1, inputData[i]));
view.setInt16(i * 2, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
socketTask.send({
data: buffer,
fail: (e) => console.error('发送音频失败', e)
});
};
mediaStreamSource.connect(scriptProcessor);
scriptProcessor.connect(audioContext.destination);
isRecording.value = true;
recordingDuration.value = 0;
durationTimer = setInterval(() => recordingDuration.value++, 1000);
console.log('H5 录音已启动');
} catch (err) {
console.error('H5 录音启动失败:', err);
throw err;
}
}
const stopH5Resources = () => {
if (scriptProcessor) scriptProcessor.disconnect();
if (mediaStreamSource) mediaStreamSource.disconnect();
if (audioContext) audioContext.close();
if (h5Stream) h5Stream.getTracks().forEach(track => track.stop());
scriptProcessor = null;
mediaStreamSource = null;
audioContext = null;
h5Stream = null;
}
const startAppRecording = () => {
recorderManager = uni.getRecorderManager();
recorderManager.onFrameRecorded((res) => {
const {
frameBuffer
} = res;
calculateVolume(frameBuffer, false);
if (isSocketConnected.value && socketTask) {
socketTask.send({
data: frameBuffer
});
}
});
recorderManager.onStart(() => {
console.log('APP 录音已开始');
isRecording.value = true;
recordingDuration.value = 0;
durationTimer = setInterval(() => recordingDuration.value++, 1000);
});
recorderManager.onError((err) => {
console.error('APP 录音报错:', err);
cleanup();
});
recorderManager.start(RECORD_CONFIG);
}
const stopHardwareResource = () => {
// APP/小程序停止
if (recorderManager) {
recorderManager.stop();
}
// H5停止
// #ifdef H5
if (scriptProcessor) scriptProcessor.disconnect();
if (mediaStreamSource) mediaStreamSource.disconnect();
if (audioContext) audioContext.close();
if (h5Stream) h5Stream.getTracks().forEach(track => track.stop());
scriptProcessor = null;
mediaStreamSource = null;
audioContext = null;
h5Stream = null;
// #endif
}
/**
* 停止录音 (通用)
*/
const stopRecording = () => {
// 停止 APP 录音
if (recorderManager) {
recorderManager.stop();
}
// 停止 H5 录音资源
// #ifdef H5
stopH5Resources();
// #endif
// 关闭 Socket
if (socketTask) {
socketTask.close();
}
cleanup();
}
const cancelRecording = () => {
if (!isRecording.value) return;
console.log('取消录音 - 丢弃结果');
// 1. 停止硬件录音
stopHardwareResource();
// 2. 强制关闭 Socket
if (socketTask) {
socketTask.close();
}
// 3. 关键:清空已识别的文本
recognizedText.value = '';
// 4. 清理资源
cleanup();
}
/**
* 清理状态
*/
const cleanup = () => {
clearInterval(durationTimer);
isRecording.value = false;
isSocketConnected.value = false;
socketTask = null;
recorderManager = null;
volumeLevel.value = 0;
}
/**
* 计算音量 (兼容 Float32 和 Int16/ArrayBuffer)
*/
const calculateVolume = (data, isFloat32) => {
let sum = 0;
let length = 0;
if (isFloat32) {
length = data.length;
for (let i = 0; i < length; i += 10) {
sum += Math.abs(data[i]);
}
volumeLevel.value = Math.min(100, Math.floor((sum / (length / 10)) * 100 * 3));
} else {
const int16Data = new Int16Array(data);
length = int16Data.length;
for (let i = 0; i < length; i += 10) {
sum += Math.abs(int16Data[i]);
}
const avg = sum / (length / 10);
volumeLevel.value = Math.min(100, Math.floor((avg / 10000) * 100));
}
}
onUnmounted(() => {
if (isRecording.value) {
stopRecording();
}
})
return {
isRecording,
isSocketConnected,
recordingDuration,
volumeLevel,
recognizedText,
startRecording,
stopRecording,
cancelRecording
}
}

View File

@@ -1,203 +0,0 @@
import {
ref,
readonly,
onUnmounted
} from 'vue';
// 检查 API 兼容性
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
const isApiSupported = !!SpeechRecognition && !!navigator.mediaDevices && !!window.AudioContext;
/**
* @param {object} [options]
* @param {string} [options.lang] - Language code (e.g., 'zh-CN', 'en-US')
* @returns {object}
*/
export function useAudioRecorder(options = {}) {
const lang = options.lang || 'zh-CN'; // 默认使用中文
const isRecording = ref(false);
const recognizedText = ref(''); // 完整的识别文本(包含临时的)
const lastFinalText = ref(''); // 最后一段已确定的文本
const volumeLevel = ref(0); // 音量 (0-100)
const audioDataForDisplay = ref(new Uint8Array()); // 波形数据
let recognition = null;
let audioContext = null;
let analyser = null;
let mediaStreamSource = null;
let mediaStream = null;
let dataArray = null; // 用于音量和波形
let animationFrameId = null;
if (!isApiSupported) {
console.warn(
'此浏览器不支持Web语音API或Web音频API。钩子无法正常工作。'
);
return {
isRecording: readonly(isRecording),
startRecording: () => console.error('Audio recording not supported.'),
stopRecording: () => {},
cancelRecording: () => {},
audioDataForDisplay: readonly(audioDataForDisplay),
volumeLevel: readonly(volumeLevel),
recognizedText: readonly(recognizedText),
lastFinalText: readonly(lastFinalText),
};
}
const setupRecognition = () => {
recognition = new SpeechRecognition();
recognition.lang = lang;
recognition.continuous = true; // 持续识别
recognition.interimResults = true; // 返回临时结果
recognition.onstart = () => {
isRecording.value = true;
};
recognition.onend = () => {
isRecording.value = false;
stopAudioAnalysis(); // 语音识别停止时,也停止音频分析
};
recognition.onerror = (event) => {
console.error('SpeechRecognition Error:', event.error);
isRecording.value = false;
stopAudioAnalysis();
};
recognition.onresult = (event) => {
let interim = '';
let final = '';
for (let i = 0; i < event.results.length; i++) {
const transcript = event.results[i][0].transcript;
if (event.results[i].isFinal) {
final += transcript;
lastFinalText.value = transcript; // 存储最后一段确定的文本
} else {
interim += transcript;
}
}
recognizedText.value = final + interim; // 组合为完整文本
};
};
const startAudioAnalysis = async () => {
try {
mediaStream = await navigator.mediaDevices.getUserMedia({
audio: true
});
audioContext = new AudioContext();
analyser = audioContext.createAnalyser();
mediaStreamSource = audioContext.createMediaStreamSource(mediaStream);
// 设置 Analyser
analyser.fftSize = 512; // 必须是 2 的幂
const bufferLength = analyser.frequencyBinCount;
dataArray = new Uint8Array(bufferLength); // 用于波形
// 连接节点
mediaStreamSource.connect(analyser);
// 开始循环分析
updateAudioData();
} catch (err) {
console.error('Failed to get media stream or setup AudioContext:', err);
if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') {
alert('麦克风权限被拒绝。请在浏览器设置中允许访问麦克风。');
}
}
};
const updateAudioData = () => {
if (!isRecording.value) return; // 如果停止了就退出循环
// 获取时域数据 (波形)
analyser.getByteTimeDomainData(dataArray);
audioDataForDisplay.value = new Uint8Array(dataArray); // 复制数组以触发响应式
// 计算音量 (RMS)
let sumSquares = 0.0;
for (const amplitude of dataArray) {
const normalized = (amplitude / 128.0) - 1.0; // 转换为 -1.0 到 1.0
sumSquares += normalized * normalized;
}
const rms = Math.sqrt(sumSquares / dataArray.length);
volumeLevel.value = Math.min(100, Math.floor(rms * 250)); // 放大 RMS 值到 0-100 范围
animationFrameId = requestAnimationFrame(updateAudioData);
};
const stopAudioAnalysis = () => {
if (animationFrameId) {
cancelAnimationFrame(animationFrameId);
animationFrameId = null;
}
// 停止麦克风轨道
mediaStream?.getTracks().forEach((track) => track.stop());
// 关闭 AudioContext
audioContext?.close().catch((e) => console.error('Error closing AudioContext', e));
mediaStream = null;
audioContext = null;
analyser = null;
mediaStreamSource = null;
volumeLevel.value = 0;
audioDataForDisplay.value = new Uint8Array();
};
const startRecording = async () => {
if (isRecording.value) return;
// 重置状态
recognizedText.value = '';
lastFinalText.value = '';
try {
// 必须先启动音频分析以获取麦克风权限
await startAudioAnalysis();
// 如果音频启动成功 (mediaStream 存在),则启动语音识别
if (mediaStream) {
setupRecognition();
recognition.start();
}
} catch (error) {
console.error("Error starting recording:", error);
}
};
const stopRecording = () => {
if (!isRecording.value || !recognition) return;
recognition.stop(); // 这将触发 onend 事件,自动停止音频分析
};
const cancelRecording = () => {
if (!recognition) return;
isRecording.value = false; // 立即设置状态
recognition.abort(); // 这也会触发 onend
recognizedText.value = '';
lastFinalText.value = '';
};
onUnmounted(() => {
if (recognition) {
recognition.abort();
}
stopAudioAnalysis();
});
return {
isRecording: readonly(isRecording),
startRecording,
stopRecording,
cancelRecording,
audioDataForDisplay: readonly(audioDataForDisplay),
volumeLevel: readonly(volumeLevel),
recognizedText: readonly(recognizedText),
lastFinalText: readonly(lastFinalText),
isApiSupported, // 导出支持状态
};
}

View File

@@ -1,217 +1,205 @@
import {
ref,
onUnmounted,
onBeforeUnmount,
onMounted
onUnmounted
} from 'vue'
import {
onHide,
onUnload
} from '@dcloudio/uni-app'
import WavDecoder from '@/lib/wav-decoder@1.3.0.js'
import config from '@/config'
import PiperTTS from './piper-bundle.js'
export function useTTSPlayer() {
const isSpeaking = ref(false)
const isPaused = ref(false)
const isComplete = ref(false)
// UI 状态
const isSpeaking = ref(false) // 是否正在交互(含播放、暂停、加载)
const isPaused = ref(false) // 是否处于暂停状态
const isLoading = ref(false) // 是否正在加载/连接
const audioContext = new(window.AudioContext || window.webkitAudioContext)()
let playTime = audioContext.currentTime
let sourceNodes = []
let socket = null
let sampleRate = 16000
let numChannels = 1
let isHeaderDecoded = false
let pendingText = null
// 单例 Piper 实例
let piper = null
let currentPlayId = 0
let activePlayId = 0
/**
* 获取或创建 SDK 实例
*/
const getPiperInstance = () => {
if (!piper) {
let baseUrl = config.speechSynthesis2 || ''
baseUrl = baseUrl.replace(/\/$/, '')
const speak = (text) => {
currentPlayId++
const myPlayId = currentPlayId
reset()
pendingText = text
activePlayId = myPlayId
}
const pause = () => {
if (audioContext.state === 'running') {
audioContext.suspend()
isPaused.value = true
isSpeaking.value = false
}
}
const resume = () => {
if (audioContext.state === 'suspended') {
audioContext.resume()
isPaused.value = false
isSpeaking.value = true
}
}
const cancelAudio = () => {
stop()
}
const stop = () => {
isSpeaking.value = false
isPaused.value = false
isComplete.value = false
playTime = audioContext.currentTime
sourceNodes.forEach(node => {
try {
node.stop()
node.disconnect()
} catch (e) {}
})
sourceNodes = []
if (socket) {
socket.close()
socket = null
}
isHeaderDecoded = false
pendingText = null
}
const reset = () => {
stop()
isSpeaking.value = false
isPaused.value = false
isComplete.value = false
playTime = audioContext.currentTime
initWebSocket()
}
const initWebSocket = () => {
const thisPlayId = currentPlayId
socket = new WebSocket(config.speechSynthesis)
socket.binaryType = 'arraybuffer'
socket.onopen = () => {
if (pendingText && thisPlayId === activePlayId) {
const seepdText = extractSpeechText(pendingText)
console.log(seepdText)
socket.send(seepdText)
pendingText = null
}
}
socket.onmessage = async (e) => {
if (thisPlayId !== activePlayId) return // 忽略旧播放的消息
if (typeof e.data === 'string') {
try {
const msg = JSON.parse(e.data)
if (msg.status === 'complete') {
isComplete.value = true
setTimeout(() => {
if (thisPlayId === activePlayId) {
isSpeaking.value = false
}
}, (playTime - audioContext.currentTime) * 1000)
piper = new PiperTTS({
baseUrl: baseUrl,
sampleRate: 16000,
onStatus: (msg, type) => {
if (type === 'error') {
console.error('[TTS Error]', msg)
resetState()
}
} catch (e) {
console.log('[TTSPlayer] 文本消息:', e.data)
},
onStart: () => {
isLoading.value = false
isSpeaking.value = true
isPaused.value = false
},
onEnd: () => {
// 只有非暂停状态下的结束,才重置所有状态
// 如果是用户手动暂停导致的中断,不应视为自然播放结束
isSpeaking.value = false
isLoading.value = false
isPaused.value = false
}
} else if (e.data instanceof ArrayBuffer) {
if (!isHeaderDecoded) {
try {
const decoded = await WavDecoder.decode(e.data)
sampleRate = decoded.sampleRate
numChannels = decoded.channelData.length
decoded.channelData.forEach((channel, i) => {
const audioBuffer = audioContext.createBuffer(1, channel.length,
sampleRate)
audioBuffer.copyToChannel(channel, 0)
playBuffer(audioBuffer)
})
isHeaderDecoded = true
} catch (err) {
console.error('WAV 解码失败:', err)
}
} else {
const pcm = new Int16Array(e.data)
const audioBuffer = pcmToAudioBuffer(pcm, sampleRate, numChannels)
playBuffer(audioBuffer)
}
}
})
}
return piper
}
const pcmToAudioBuffer = (pcm, sampleRate, numChannels) => {
const length = pcm.length / numChannels
const audioBuffer = audioContext.createBuffer(numChannels, length, sampleRate)
for (let ch = 0; ch < numChannels; ch++) {
const channelData = audioBuffer.getChannelData(ch)
for (let i = 0; i < length; i++) {
const sample = pcm[i * numChannels + ch]
channelData[i] = sample / 32768
}
}
return audioBuffer
}
/**
* 核心朗读方法
*/
const speak = async (text) => {
if (!text) return
const playBuffer = (audioBuffer) => {
if (!isSpeaking.value) {
playTime = audioContext.currentTime
}
const source = audioContext.createBufferSource()
source.buffer = audioBuffer
source.connect(audioContext.destination)
source.start(playTime)
sourceNodes.push(source)
playTime += audioBuffer.duration
const processedText = extractSpeechText(text)
if (!processedText) return
const instance = getPiperInstance()
// 重置状态
isLoading.value = true
isPaused.value = false
isSpeaking.value = true
try {
// 直接调用 speakSDK 内部会自动处理 init 和 stop
await instance.speak(processedText, {
speakerId: 0,
noiseScale: 0.667,
lengthScale: 1.0
})
} catch (e) {
console.error('TTS Speak Error:', e)
resetState()
}
}
onUnmounted(() => {
stop()
})
/**
* 暂停
*/
const pause = async () => {
// 1. 只有正在播放且未暂停时,才执行暂停
if (!isSpeaking.value || isPaused.value) return
// 页面刷新/关闭时
onMounted(() => {
if (typeof window !== 'undefined') {
window.addEventListener('beforeunload', cancelAudio)
// 2. 检查播放器实例是否存在
if (piper && piper.player) {
try {
// 执行音频挂起
await piper.player.pause()
// 3. 成功后更新 UI
isPaused.value = true
} catch (e) {
console.error("Pause failed:", e)
// 即使报错,如果不是致命错误,也可以尝试强制更新 UI
// isPaused.value = true
}
}
})
}
onBeforeUnmount(() => {
cancelAudio()
if (typeof window !== 'undefined') {
window.removeEventListener('beforeunload', cancelAudio)
/**
* 恢复 (继续播放)
*/
const resume = async () => {
// 1. 只有处于暂停状态时,才执行恢复
if (!isPaused.value) return
if (piper && piper.player) {
try {
await piper.player.continue()
// 2. 成功后更新 UI
isPaused.value = false
isSpeaking.value = true
} catch (e) {
console.error("Resume failed:", e)
}
}
})
}
onHide(cancelAudio)
onUnload(cancelAudio)
/**
* 切换 播放/暂停 (方便按钮绑定)
*/
const togglePlay = () => {
if (isPaused.value) {
resume()
} else {
pause()
}
}
initWebSocket()
/**
* 停止 (中断)
*/
const stop = () => {
if (piper) {
piper.stop()
}
resetState()
}
/**
* 彻底销毁
*/
const destroy = () => {
if (piper) {
piper.stop()
piper = null
}
resetState()
}
const resetState = () => {
isSpeaking.value = false
isPaused.value = false
isLoading.value = false
}
// === 生命周期管理 ===
onUnmounted(destroy)
if (typeof onHide === 'function') {
onHide(() => {
togglePlay()
// stop()
})
}
if (typeof onUnload === 'function') {
onUnload(destroy)
}
return {
speak,
pause,
resume,
cancelAudio,
togglePlay, // 新增:单按钮切换功能
stop,
cancelAudio: stop,
isSpeaking,
isPaused,
isComplete
isLoading
}
}
/**
* 文本提取工具函数 (保持原样)
*/
function extractSpeechText(markdown) {
if (!markdown || markdown.indexOf('job-json') === -1) {
return markdown;
}
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
const jobs = [];
let match;
let lastJobEndIndex = 0;
let firstJobStartIndex = -1;
// 提取岗位 json 数据及前后位置
while ((match = jobRegex.exec(markdown)) !== null) {
const jobStr = match[1];
try {
@@ -225,27 +213,16 @@ function extractSpeechText(markdown) {
console.warn('JSON 解析失败', e);
}
}
// 提取引导语(第一个 job-json 之前的文字)
const guideText = firstJobStartIndex > 0 ?
markdown.slice(0, firstJobStartIndex).trim() :
'';
// 提取结束语(最后一个 job-json 之后的文字)
markdown.slice(0, firstJobStartIndex).trim() : '';
const endingText = lastJobEndIndex < markdown.length ?
markdown.slice(lastJobEndIndex).trim() :
'';
// 岗位信息格式化为语音文本
markdown.slice(lastJobEndIndex).trim() : '';
const jobTexts = jobs.map((job, index) => {
return `${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}`;
});
// 拼接总语音内容
const finalTextParts = [];
if (guideText) finalTextParts.push(guideText);
finalTextParts.push(...jobTexts);
if (endingText) finalTextParts.push(endingText);
return finalTextParts.join('\n');
}

View File

@@ -1,216 +0,0 @@
import {
ref,
onUnmounted,
onMounted,
watch
} from 'vue'
import {
onHide,
onUnload
} from '@dcloudio/uni-app'
import config from '@/config'
// 请确保 piper-sdk.js 已经正确 export class PiperTTS
import {
PiperTTS
} from './piper-sdk.js'
export function useTTSPlayer() {
// UI 状态
const isSpeaking = ref(false)
const isPaused = ref(false)
const isLoading = ref(false)
// SDK 实例
let piper = null
/**
* 初始化 SDK 实例
* 每次 stop 后 piper 会被置空,这里会重新创建
*/
const initPiper = () => {
if (piper) return
let baseUrl = config.speechSynthesis2 || ''
baseUrl = baseUrl.replace(/\/$/, '')
piper = new PiperTTS({
baseUrl: baseUrl,
onStatus: (msg, type) => {
if (type === 'error') {
console.error('[TTS Error]', msg)
// 出错时不重置状态,交给用户手动处理或结束事件处理
resetState()
}
},
onStart: () => {
isLoading.value = false
isSpeaking.value = true
isPaused.value = false
},
onEnd: () => {
resetState()
}
})
}
/**
* 核心朗读方法
*/
const speak = async (text) => {
if (!text) return
const processedText = extractSpeechText(text)
if (!processedText) return
// 1. 【关键修改】先彻底停止并销毁旧实例
// 这会断开 socket 并且 close AudioContext确保上一个声音立即消失
await stop()
// 2. 初始化新实例 (因为 stop() 把 piper 设为了 null)
initPiper()
// 3. 更新 UI 为加载中
isLoading.value = true
isPaused.value = false
isSpeaking.value = true // 预先设为 true防止按钮闪烁
try {
// 4. 激活音频引擎 (移动端防静音关键)
await piper.init()
// 5. 发送请求
piper.speak(processedText, {
speakerId: 0,
noiseScale: 0.667,
lengthScale: 1.0
})
} catch (e) {
console.error('TTS Speak Error:', e)
resetState()
}
}
/**
* 暂停
*/
const pause = async () => {
if (piper && piper.audioCtx && piper.audioCtx.state === 'running') {
await piper.audioCtx.suspend()
isPaused.value = true
}
}
/**
* 恢复
*/
const resume = async () => {
if (piper && piper.audioCtx && piper.audioCtx.state === 'suspended') {
await piper.audioCtx.resume()
isPaused.value = false
isSpeaking.value = true
}
}
/**
* 停止并重置 (核打击模式)
*/
const stop = async () => {
if (piper) {
// 1. 断开 WebSocket
piper.stop()
// 2. 【关键】关闭 AudioContext
// Web Audio API 中,已经 schedule 的 buffer 很难单独取消
// 最直接的方法是关闭整个 Context
if (piper.audioCtx && piper.audioCtx.state !== 'closed') {
try {
await piper.audioCtx.close()
} catch (e) {
console.warn('AudioContext close failed', e)
}
}
// 3. 销毁实例引用
piper = null
}
resetState()
}
// UI 状态重置
const resetState = () => {
isSpeaking.value = false
isPaused.value = false
isLoading.value = false
}
// === 生命周期 ===
onMounted(() => {
// 预初始化可以不做,等到点击时再做,避免空闲占用 AudioContext 资源
// initPiper()
})
onUnmounted(() => {
stop()
})
// Uniapp 生命周期
if (typeof onHide === 'function') onHide(stop)
if (typeof onUnload === 'function') onUnload(stop)
return {
speak,
pause,
resume,
stop,
cancelAudio: stop,
isSpeaking,
isPaused,
isLoading
}
}
/**
* 提取文本逻辑 (保持不变)
*/
function extractSpeechText(markdown) {
if (!markdown || markdown.indexOf('job-json') === -1) {
return markdown;
}
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
const jobs = [];
let match;
let lastJobEndIndex = 0;
let firstJobStartIndex = -1;
while ((match = jobRegex.exec(markdown)) !== null) {
const jobStr = match[1];
try {
const job = JSON.parse(jobStr);
jobs.push(job);
if (firstJobStartIndex === -1) {
firstJobStartIndex = match.index;
}
lastJobEndIndex = jobRegex.lastIndex;
} catch (e) {
console.warn('JSON 解析失败', e);
}
}
const guideText = firstJobStartIndex > 0 ?
markdown.slice(0, firstJobStartIndex).trim() : '';
const endingText = lastJobEndIndex < markdown.length ?
markdown.slice(lastJobEndIndex).trim() : '';
const jobTexts = jobs.map((job, index) => {
return `${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}`;
});
const finalTextParts = [];
if (guideText) finalTextParts.push(guideText);
finalTextParts.push(...jobTexts);
if (endingText) finalTextParts.push(endingText);
return finalTextParts.join('\n');
}