2025-04-16 14:24:06 +08:00
|
|
|
|
import {
|
|
|
|
|
ref,
|
|
|
|
|
onUnmounted,
|
|
|
|
|
onBeforeUnmount,
|
|
|
|
|
onMounted
|
|
|
|
|
} from 'vue'
|
|
|
|
|
import {
|
|
|
|
|
onHide,
|
|
|
|
|
onUnload
|
|
|
|
|
} from '@dcloudio/uni-app'
|
|
|
|
|
import WavDecoder from '@/lib/wav-decoder@1.3.0.js'
|
|
|
|
|
|
|
|
|
|
export function useTTSPlayer(wsUrl) {
|
|
|
|
|
const isSpeaking = ref(false)
|
|
|
|
|
const isPaused = ref(false)
|
|
|
|
|
const isComplete = ref(false)
|
|
|
|
|
|
2025-10-21 22:58:47 +08:00
|
|
|
|
// #ifdef H5
|
|
|
|
|
const audioContext = typeof window !== 'undefined' && (window.AudioContext || window.webkitAudioContext)
|
|
|
|
|
? new(window.AudioContext || window.webkitAudioContext)()
|
|
|
|
|
: null
|
|
|
|
|
// #endif
|
|
|
|
|
|
|
|
|
|
// #ifdef MP-WEIXIN
|
|
|
|
|
const audioContext = null // 微信小程序不支持 AudioContext
|
|
|
|
|
// #endif
|
|
|
|
|
|
|
|
|
|
let playTime = audioContext ? audioContext.currentTime : 0
|
2025-04-16 14:24:06 +08:00
|
|
|
|
let sourceNodes = []
|
|
|
|
|
let socket = null
|
|
|
|
|
let sampleRate = 16000
|
|
|
|
|
let numChannels = 1
|
|
|
|
|
let isHeaderDecoded = false
|
|
|
|
|
let pendingText = null
|
|
|
|
|
|
|
|
|
|
let currentPlayId = 0
|
|
|
|
|
let activePlayId = 0
|
|
|
|
|
|
|
|
|
|
const speak = (text) => {
|
2025-10-21 22:58:47 +08:00
|
|
|
|
if (!audioContext) {
|
|
|
|
|
console.warn('⚠️ TTS not supported in current environment');
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🎤 TTS speak function called');
|
|
|
|
|
console.log('📝 Text to synthesize:', text ? text.substring(0, 100) + '...' : 'No text');
|
|
|
|
|
console.log('🔗 WebSocket URL:', wsUrl);
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
currentPlayId++
|
|
|
|
|
const myPlayId = currentPlayId
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🆔 Play ID:', myPlayId);
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
reset()
|
|
|
|
|
pendingText = text
|
|
|
|
|
activePlayId = myPlayId
|
2025-10-13 16:01:49 +08:00
|
|
|
|
|
|
|
|
|
console.log('✅ Speak function setup complete');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const pause = () => {
|
2025-10-21 22:58:47 +08:00
|
|
|
|
if (!audioContext) {
|
|
|
|
|
console.warn('⚠️ TTS not supported in current environment');
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('⏸️ TTS pause called');
|
|
|
|
|
console.log('🔊 AudioContext state:', audioContext.state);
|
|
|
|
|
console.log('🔊 Is speaking before pause:', isSpeaking.value);
|
|
|
|
|
console.log('⏸️ Is paused before pause:', isPaused.value);
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
if (audioContext.state === 'running') {
|
|
|
|
|
audioContext.suspend()
|
|
|
|
|
isPaused.value = true
|
2025-10-13 16:01:49 +08:00
|
|
|
|
// 不要设置 isSpeaking.value = false,保持当前状态
|
|
|
|
|
console.log('✅ Audio paused successfully');
|
|
|
|
|
} else {
|
|
|
|
|
console.log('⚠️ AudioContext is not running, cannot pause');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
2025-10-13 16:01:49 +08:00
|
|
|
|
|
|
|
|
|
console.log('🔊 Is speaking after pause:', isSpeaking.value);
|
|
|
|
|
console.log('⏸️ Is paused after pause:', isPaused.value);
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const resume = () => {
|
2025-10-21 22:58:47 +08:00
|
|
|
|
if (!audioContext) {
|
|
|
|
|
console.warn('⚠️ TTS not supported in current environment');
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('▶️ TTS resume called');
|
|
|
|
|
console.log('🔊 AudioContext state:', audioContext.state);
|
|
|
|
|
console.log('🔊 Is speaking before resume:', isSpeaking.value);
|
|
|
|
|
console.log('⏸️ Is paused before resume:', isPaused.value);
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
if (audioContext.state === 'suspended') {
|
|
|
|
|
audioContext.resume()
|
|
|
|
|
isPaused.value = false
|
|
|
|
|
isSpeaking.value = true
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('✅ Audio resumed successfully');
|
|
|
|
|
} else {
|
|
|
|
|
console.log('⚠️ AudioContext is not suspended, cannot resume');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
2025-10-13 16:01:49 +08:00
|
|
|
|
|
|
|
|
|
console.log('🔊 Is speaking after resume:', isSpeaking.value);
|
|
|
|
|
console.log('⏸️ Is paused after resume:', isPaused.value);
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const cancelAudio = () => {
|
|
|
|
|
stop()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const stop = () => {
|
|
|
|
|
isSpeaking.value = false
|
|
|
|
|
isPaused.value = false
|
|
|
|
|
isComplete.value = false
|
2025-10-21 22:58:47 +08:00
|
|
|
|
playTime = audioContext ? audioContext.currentTime : 0
|
2025-04-16 14:24:06 +08:00
|
|
|
|
|
|
|
|
|
sourceNodes.forEach(node => {
|
|
|
|
|
try {
|
|
|
|
|
node.stop()
|
|
|
|
|
node.disconnect()
|
|
|
|
|
} catch (e) {}
|
|
|
|
|
})
|
|
|
|
|
sourceNodes = []
|
|
|
|
|
|
|
|
|
|
if (socket) {
|
|
|
|
|
socket.close()
|
|
|
|
|
socket = null
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
isHeaderDecoded = false
|
|
|
|
|
pendingText = null
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const reset = () => {
|
|
|
|
|
stop()
|
|
|
|
|
isSpeaking.value = false
|
|
|
|
|
isPaused.value = false
|
|
|
|
|
isComplete.value = false
|
2025-10-21 22:58:47 +08:00
|
|
|
|
playTime = audioContext ? audioContext.currentTime : 0
|
2025-04-16 14:24:06 +08:00
|
|
|
|
initWebSocket()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const initWebSocket = () => {
|
2025-10-21 22:58:47 +08:00
|
|
|
|
if (!audioContext) {
|
|
|
|
|
console.warn('⚠️ WebSocket TTS not supported in current environment');
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
const thisPlayId = currentPlayId
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🔌 Initializing WebSocket connection');
|
|
|
|
|
console.log('🔗 WebSocket URL:', wsUrl);
|
|
|
|
|
console.log('🆔 This play ID:', thisPlayId);
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
socket = new WebSocket(wsUrl)
|
|
|
|
|
socket.binaryType = 'arraybuffer'
|
2025-10-13 16:01:49 +08:00
|
|
|
|
|
|
|
|
|
// 设置心跳检测,避免超时
|
|
|
|
|
const heartbeatInterval = setInterval(() => {
|
|
|
|
|
if (socket && socket.readyState === WebSocket.OPEN) {
|
|
|
|
|
socket.send(JSON.stringify({ type: 'ping' }));
|
|
|
|
|
}
|
|
|
|
|
}, 30000); // 每30秒发送一次心跳
|
2025-04-16 14:24:06 +08:00
|
|
|
|
|
|
|
|
|
socket.onopen = () => {
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('✅ WebSocket connection opened');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
if (pendingText && thisPlayId === activePlayId) {
|
|
|
|
|
const seepdText = extractSpeechText(pendingText)
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('📤 Sending text to TTS server:', seepdText.substring(0, 100) + '...');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
socket.send(seepdText)
|
|
|
|
|
pendingText = null
|
2025-10-13 16:01:49 +08:00
|
|
|
|
} else {
|
|
|
|
|
console.log('❌ No pending text or play ID mismatch');
|
|
|
|
|
console.log('📝 Pending text exists:', !!pendingText);
|
|
|
|
|
console.log('🆔 Play ID match:', thisPlayId === activePlayId);
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
|
|
|
|
}
|
2025-10-13 16:01:49 +08:00
|
|
|
|
|
|
|
|
|
socket.onerror = (error) => {
|
|
|
|
|
console.error('❌ WebSocket error:', error);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
socket.onclose = (event) => {
|
|
|
|
|
console.log('🔌 WebSocket connection closed:', event.code, event.reason);
|
|
|
|
|
clearInterval(heartbeatInterval);
|
|
|
|
|
}
|
2025-04-16 14:24:06 +08:00
|
|
|
|
|
|
|
|
|
socket.onmessage = async (e) => {
|
|
|
|
|
if (thisPlayId !== activePlayId) return // 忽略旧播放的消息
|
|
|
|
|
|
|
|
|
|
if (typeof e.data === 'string') {
|
|
|
|
|
try {
|
|
|
|
|
const msg = JSON.parse(e.data)
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('📨 TTS server message:', msg);
|
2025-04-16 14:24:06 +08:00
|
|
|
|
if (msg.status === 'complete') {
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('✅ TTS synthesis completed');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
isComplete.value = true
|
2025-10-13 16:01:49 +08:00
|
|
|
|
// 计算剩余播放时间,确保播放完整
|
2025-10-21 22:58:47 +08:00
|
|
|
|
const remainingTime = audioContext ? Math.max(0, (playTime - audioContext.currentTime) * 1000) : 0;
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('⏱️ Remaining play time:', remainingTime + 'ms');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
setTimeout(() => {
|
|
|
|
|
if (thisPlayId === activePlayId) {
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🔇 TTS playback finished, setting isSpeaking to false');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
isSpeaking.value = false
|
|
|
|
|
}
|
2025-10-13 16:01:49 +08:00
|
|
|
|
}, remainingTime + 500) // 额外500ms缓冲时间
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
|
|
|
|
} catch (e) {
|
|
|
|
|
console.log('[TTSPlayer] 文本消息:', e.data)
|
|
|
|
|
}
|
|
|
|
|
} else if (e.data instanceof ArrayBuffer) {
|
|
|
|
|
if (!isHeaderDecoded) {
|
|
|
|
|
try {
|
|
|
|
|
const decoded = await WavDecoder.decode(e.data)
|
|
|
|
|
sampleRate = decoded.sampleRate
|
|
|
|
|
numChannels = decoded.channelData.length
|
|
|
|
|
decoded.channelData.forEach((channel, i) => {
|
|
|
|
|
const audioBuffer = audioContext.createBuffer(1, channel.length,
|
|
|
|
|
sampleRate)
|
|
|
|
|
audioBuffer.copyToChannel(channel, 0)
|
|
|
|
|
playBuffer(audioBuffer)
|
|
|
|
|
})
|
|
|
|
|
isHeaderDecoded = true
|
|
|
|
|
} catch (err) {
|
|
|
|
|
console.error('WAV 解码失败:', err)
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
const pcm = new Int16Array(e.data)
|
|
|
|
|
const audioBuffer = pcmToAudioBuffer(pcm, sampleRate, numChannels)
|
|
|
|
|
playBuffer(audioBuffer)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const pcmToAudioBuffer = (pcm, sampleRate, numChannels) => {
|
2025-10-21 22:58:47 +08:00
|
|
|
|
if (!audioContext) return null;
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
const length = pcm.length / numChannels
|
|
|
|
|
const audioBuffer = audioContext.createBuffer(numChannels, length, sampleRate)
|
|
|
|
|
for (let ch = 0; ch < numChannels; ch++) {
|
|
|
|
|
const channelData = audioBuffer.getChannelData(ch)
|
|
|
|
|
for (let i = 0; i < length; i++) {
|
|
|
|
|
const sample = pcm[i * numChannels + ch]
|
|
|
|
|
channelData[i] = sample / 32768
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return audioBuffer
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const playBuffer = (audioBuffer) => {
|
2025-10-21 22:58:47 +08:00
|
|
|
|
if (!audioContext || !audioBuffer) return;
|
|
|
|
|
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🎵 playBuffer called, duration:', audioBuffer.duration + 's');
|
2025-04-16 14:24:06 +08:00
|
|
|
|
if (!isSpeaking.value) {
|
|
|
|
|
playTime = audioContext.currentTime
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🎵 Starting new audio playback at time:', playTime);
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
|
|
|
|
const source = audioContext.createBufferSource()
|
|
|
|
|
source.buffer = audioBuffer
|
|
|
|
|
source.connect(audioContext.destination)
|
|
|
|
|
source.start(playTime)
|
|
|
|
|
sourceNodes.push(source)
|
|
|
|
|
playTime += audioBuffer.duration
|
|
|
|
|
isSpeaking.value = true
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🎵 Audio scheduled, new playTime:', playTime);
|
|
|
|
|
|
|
|
|
|
// 添加音频播放结束监听
|
|
|
|
|
source.onended = () => {
|
|
|
|
|
console.log('🎵 Audio buffer finished playing');
|
|
|
|
|
}
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
onUnmounted(() => {
|
|
|
|
|
stop()
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
// 页面刷新/关闭时
|
|
|
|
|
onMounted(() => {
|
|
|
|
|
if (typeof window !== 'undefined') {
|
|
|
|
|
window.addEventListener('beforeunload', cancelAudio)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
onBeforeUnmount(() => {
|
|
|
|
|
cancelAudio()
|
|
|
|
|
if (typeof window !== 'undefined') {
|
|
|
|
|
window.removeEventListener('beforeunload', cancelAudio)
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
onHide(cancelAudio)
|
|
|
|
|
onUnload(cancelAudio)
|
|
|
|
|
|
2025-10-21 22:58:47 +08:00
|
|
|
|
// 只在支持 AudioContext 的环境中初始化 WebSocket
|
|
|
|
|
if (audioContext) {
|
|
|
|
|
initWebSocket()
|
|
|
|
|
}
|
2025-04-16 14:24:06 +08:00
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
speak,
|
|
|
|
|
pause,
|
|
|
|
|
resume,
|
|
|
|
|
cancelAudio,
|
|
|
|
|
isSpeaking,
|
|
|
|
|
isPaused,
|
|
|
|
|
isComplete
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
function extractSpeechText(markdown) {
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('🔍 extractSpeechText called');
|
|
|
|
|
console.log('📝 Input markdown length:', markdown ? markdown.length : 0);
|
|
|
|
|
console.log('📝 Input markdown preview:', markdown ? markdown.substring(0, 200) + '...' : 'No markdown');
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
|
|
|
|
|
const jobs = [];
|
|
|
|
|
let match;
|
|
|
|
|
let lastJobEndIndex = 0;
|
|
|
|
|
let firstJobStartIndex = -1;
|
|
|
|
|
|
|
|
|
|
// 提取岗位 json 数据及前后位置
|
|
|
|
|
while ((match = jobRegex.exec(markdown)) !== null) {
|
|
|
|
|
const jobStr = match[1];
|
|
|
|
|
try {
|
|
|
|
|
const job = JSON.parse(jobStr);
|
|
|
|
|
jobs.push(job);
|
|
|
|
|
if (firstJobStartIndex === -1) {
|
|
|
|
|
firstJobStartIndex = match.index;
|
|
|
|
|
}
|
|
|
|
|
lastJobEndIndex = jobRegex.lastIndex;
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('✅ Found job:', job.jobTitle);
|
2025-04-16 14:24:06 +08:00
|
|
|
|
} catch (e) {
|
|
|
|
|
console.warn('JSON 解析失败', e);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('📊 Jobs found:', jobs.length);
|
|
|
|
|
console.log('📍 First job start index:', firstJobStartIndex);
|
|
|
|
|
console.log('📍 Last job end index:', lastJobEndIndex);
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
// 提取引导语(第一个 job-json 之前的文字)
|
|
|
|
|
const guideText = firstJobStartIndex > 0 ?
|
|
|
|
|
markdown.slice(0, firstJobStartIndex).trim() :
|
|
|
|
|
'';
|
|
|
|
|
|
|
|
|
|
// 提取结束语(最后一个 job-json 之后的文字)
|
|
|
|
|
const endingText = lastJobEndIndex < markdown.length ?
|
|
|
|
|
markdown.slice(lastJobEndIndex).trim() :
|
|
|
|
|
'';
|
|
|
|
|
|
2025-10-13 16:01:49 +08:00
|
|
|
|
console.log('📝 Guide text:', guideText);
|
|
|
|
|
console.log('📝 Ending text:', endingText);
|
|
|
|
|
|
2025-04-16 14:24:06 +08:00
|
|
|
|
// 岗位信息格式化为语音文本
|
|
|
|
|
const jobTexts = jobs.map((job, index) => {
|
|
|
|
|
return `第 ${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}。`;
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// 拼接总语音内容
|
|
|
|
|
const finalTextParts = [];
|
|
|
|
|
if (guideText) finalTextParts.push(guideText);
|
|
|
|
|
finalTextParts.push(...jobTexts);
|
|
|
|
|
if (endingText) finalTextParts.push(endingText);
|
|
|
|
|
|
2025-10-13 16:01:49 +08:00
|
|
|
|
const finalText = finalTextParts.join('\n');
|
|
|
|
|
console.log('🎤 Final TTS text length:', finalText.length);
|
|
|
|
|
console.log('🎤 Final TTS text preview:', finalText.substring(0, 200) + '...');
|
|
|
|
|
console.log('🎤 Final TTS text parts count:', finalTextParts.length);
|
|
|
|
|
|
|
|
|
|
return finalText;
|
2025-04-16 14:24:06 +08:00
|
|
|
|
}
|