import { ref, onUnmounted, onBeforeUnmount, onMounted } from 'vue' import { onHide, onUnload } from '@dcloudio/uni-app' import WavDecoder from '@/lib/wav-decoder@1.3.0.js' export function useTTSPlayer(httpUrl) { const isSpeaking = ref(false) const isPaused = ref(false) const isComplete = ref(false) // #ifdef H5 const audioContext = typeof window !== 'undefined' && (window.AudioContext || window.webkitAudioContext) ? new(window.AudioContext || window.webkitAudioContext)() : null // #endif // #ifdef MP-WEIXIN const audioContext = null // 微信小程序不支持 AudioContext // #endif let currentAudioBuffer = null let currentSource = null let playTimeOffset = 0 const speak = async (text) => { if (!audioContext) { console.warn('⚠️ TTS not supported in current environment'); return; } console.log('🎤 TTS speak function called'); console.log('📝 Text to synthesize:', text ? text.substring(0, 100) + '...' : 'No text'); console.log('🔗 HTTP URL:', httpUrl); // 停止当前播放 stop() try { // 提取要合成的文本 const speechText = extractSpeechText(text) console.log('📤 Sending text to TTS server via GET:', speechText.substring(0, 100) + '...'); // 构建GET请求URL const url = `${httpUrl}?text=${encodeURIComponent(speechText)}` console.log('🔗 Final GET URL:', url); // 发送GET请求获取语音数据 const response = await fetch(url) if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`) } // 获取二进制数据 const arrayBuffer = await response.arrayBuffer() console.log('✅ Received audio data, size:', arrayBuffer.byteLength + ' bytes'); // 解码音频数据 const decoded = await WavDecoder.decode(arrayBuffer) console.log('✅ Audio decoded, sampleRate:', decoded.sampleRate, 'channels:', decoded.channelData.length); // 播放音频 playDecodedAudio(decoded) } catch (error) { console.error('❌ TTS synthesis failed:', error); isSpeaking.value = false isComplete.value = false } } const playDecodedAudio = (decoded) => { if (!audioContext) return; // 使用第一个声道的数据 const audioBuffer = audioContext.createBuffer(1, decoded.channelData[0].length, decoded.sampleRate) audioBuffer.copyToChannel(decoded.channelData[0], 0) currentAudioBuffer = audioBuffer // 创建音频源 currentSource = audioContext.createBufferSource() currentSource.buffer = audioBuffer currentSource.connect(audioContext.destination) // 监听播放结束 currentSource.onended = () => { console.log('🎵 Audio playback completed'); isSpeaking.value = false isComplete.value = true } // 开始播放 currentSource.start() isSpeaking.value = true isPaused.value = false isComplete.value = false console.log('� Audio playback started'); } const pause = () => { if (!audioContext || !isSpeaking.value || isPaused.value) { console.warn('⚠️ Cannot pause TTS playback'); return; } console.log('⏸️ TTS pause called'); if (audioContext.state === 'running') { audioContext.suspend() isPaused.value = true // 保存当前播放位置 playTimeOffset = audioContext.currentTime console.log('✅ Audio paused successfully'); } } const resume = () => { if (!audioContext || !isSpeaking.value || !isPaused.value) { console.warn('⚠️ Cannot resume TTS playback'); return; } console.log('▶️ TTS resume called'); if (audioContext.state === 'suspended') { audioContext.resume() isPaused.value = false console.log('✅ Audio resumed successfully'); } } const cancelAudio = () => { stop() } const stop = () => { console.log('⏹️ TTS stop called'); if (currentSource) { try { currentSource.stop() currentSource.disconnect() } catch (e) { console.error('❌ Error stopping audio source:', e); } currentSource = null } if (audioContext && audioContext.state === 'running') { try { audioContext.suspend() } catch (e) { console.error('❌ Error suspending audio context:', e); } } isSpeaking.value = false isPaused.value = false isComplete.value = false currentAudioBuffer = null playTimeOffset = 0 console.log('✅ TTS playback stopped'); } onUnmounted(() => { stop() }) // 页面刷新/关闭时 onMounted(() => { if (typeof window !== 'undefined') { window.addEventListener('beforeunload', cancelAudio) } }) onBeforeUnmount(() => { cancelAudio() if (typeof window !== 'undefined') { window.removeEventListener('beforeunload', cancelAudio) } }) onHide(cancelAudio) onUnload(cancelAudio) return { speak, pause, resume, cancelAudio, isSpeaking, isPaused, isComplete } } function extractSpeechText(markdown) { console.log('🔍 extractSpeechText called'); console.log('📝 Input markdown length:', markdown ? markdown.length : 0); console.log('📝 Input markdown preview:', markdown ? markdown.substring(0, 200) + '...' : 'No markdown'); const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g; const jobs = []; let match; let lastJobEndIndex = 0; let firstJobStartIndex = -1; // 提取岗位 json 数据及前后位置 while ((match = jobRegex.exec(markdown)) !== null) { const jobStr = match[1]; try { const job = JSON.parse(jobStr); jobs.push(job); if (firstJobStartIndex === -1) { firstJobStartIndex = match.index; } lastJobEndIndex = jobRegex.lastIndex; console.log('✅ Found job:', job.jobTitle); } catch (e) { console.warn('JSON 解析失败', e); } } console.log('📊 Jobs found:', jobs.length); console.log('📍 First job start index:', firstJobStartIndex); console.log('📍 Last job end index:', lastJobEndIndex); // 提取引导语(第一个 job-json 之前的文字) const guideText = firstJobStartIndex > 0 ? markdown.slice(0, firstJobStartIndex).trim() : ''; // 提取结束语(最后一个 job-json 之后的文字) const endingText = lastJobEndIndex < markdown.length ? markdown.slice(lastJobEndIndex).trim() : ''; console.log('📝 Guide text:', guideText); console.log('📝 Ending text:', endingText); // 岗位信息格式化为语音文本 const jobTexts = jobs.map((job, index) => { return `第 ${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}。`; }); // 拼接总语音内容 const finalTextParts = []; if (guideText) finalTextParts.push(guideText); finalTextParts.push(...jobTexts); if (endingText) finalTextParts.push(endingText); const finalText = finalTextParts.join('\n'); console.log('🎤 Final TTS text length:', finalText.length); console.log('🎤 Final TTS text preview:', finalText.substring(0, 200) + '...'); console.log('🎤 Final TTS text parts count:', finalTextParts.length); return finalText; }