371 lines
12 KiB
JavaScript
371 lines
12 KiB
JavaScript
import {
|
||
ref,
|
||
onUnmounted,
|
||
onBeforeUnmount,
|
||
onMounted
|
||
} from 'vue'
|
||
import {
|
||
onHide,
|
||
onUnload
|
||
} from '@dcloudio/uni-app'
|
||
|
||
export function useTTSPlayer(httpUrl) {
|
||
const isSpeaking = ref(false)
|
||
const isPaused = ref(false)
|
||
const isComplete = ref(false)
|
||
|
||
// #ifdef H5
|
||
const audioContext = typeof window !== 'undefined' && (window.AudioContext || window.webkitAudioContext)
|
||
? new(window.AudioContext || window.webkitAudioContext)()
|
||
: null
|
||
// #endif
|
||
|
||
// #ifdef MP-WEIXIN
|
||
const audioContext = null // 微信小程序不支持 AudioContext
|
||
let innerAudioContext = null // 微信小程序音频上下文
|
||
// #endif
|
||
|
||
let currentAudioBuffer = null
|
||
let currentSource = null
|
||
let playTimeOffset = 0
|
||
|
||
// 初始化微信小程序音频上下文
|
||
// #ifdef MP-WEIXIN
|
||
const initInnerAudioContext = () => {
|
||
if (!innerAudioContext) {
|
||
innerAudioContext = uni.createInnerAudioContext()
|
||
innerAudioContext.autoplay = false
|
||
innerAudioContext.onPlay(() => {
|
||
console.log('🎵 微信小程序音频播放开始')
|
||
isSpeaking.value = true
|
||
isPaused.value = false
|
||
})
|
||
innerAudioContext.onPause(() => {
|
||
console.log('⏸️ 微信小程序音频播放暂停')
|
||
isPaused.value = true
|
||
})
|
||
innerAudioContext.onStop(() => {
|
||
console.log('⏹️ 微信小程序音频播放停止')
|
||
isSpeaking.value = false
|
||
isComplete.value = true
|
||
})
|
||
innerAudioContext.onEnded(() => {
|
||
console.log('🎵 微信小程序音频播放结束')
|
||
isSpeaking.value = false
|
||
isComplete.value = true
|
||
})
|
||
innerAudioContext.onError((res) => {
|
||
console.error('❌ 微信小程序音频播放错误:', res.errMsg)
|
||
isSpeaking.value = false
|
||
isComplete.value = false
|
||
})
|
||
innerAudioContext.onCanplay(() => {
|
||
console.log('🎵 微信小程序音频可以播放了')
|
||
// 只有在需要播放且未播放状态下才调用play
|
||
if (isSpeaking.value && !isPaused.value) {
|
||
innerAudioContext.play()
|
||
}
|
||
})
|
||
}
|
||
}
|
||
// #endif
|
||
|
||
const speak = async (text) => {
|
||
// 停止当前播放
|
||
stop()
|
||
|
||
try {
|
||
// 提取要合成的文本
|
||
const speechText = extractSpeechText(text)
|
||
console.log('📤 Sending text to TTS server via GET:', speechText.substring(0, 100) + '...');
|
||
|
||
// 构建GET请求URL
|
||
const url = `${httpUrl}?text=${encodeURIComponent(speechText)}`
|
||
console.log('🔗 Final GET URL:', url);
|
||
|
||
// #ifdef MP-WEIXIN
|
||
// 微信小程序环境,使用微信音频API
|
||
initInnerAudioContext()
|
||
console.log('🎵 微信小程序:设置音频 src 为:', url);
|
||
// 重置音频状态
|
||
isSpeaking.value = true
|
||
isPaused.value = false
|
||
isComplete.value = false
|
||
// 设置src,等待onCanplay事件触发后再播放
|
||
innerAudioContext.src = url
|
||
// #endif
|
||
|
||
// #ifdef H5
|
||
// H5环境,使用 AudioContext
|
||
if (audioContext) {
|
||
// 发送GET请求获取语音数据
|
||
const response = await fetch(url)
|
||
if (!response.ok) {
|
||
throw new Error(`HTTP error! status: ${response.status}`)
|
||
}
|
||
|
||
// 获取二进制数据
|
||
const arrayBuffer = await response.arrayBuffer()
|
||
console.log('✅ Received audio data, size:', arrayBuffer.byteLength + ' bytes');
|
||
|
||
try {
|
||
// 直接使用 audioContext.decodeAudioData 解码,不依赖外部库
|
||
const decoded = await audioContext.decodeAudioData(arrayBuffer)
|
||
console.log('✅ Audio decoded, sampleRate:', decoded.sampleRate, 'channels:', decoded.numberOfChannels);
|
||
|
||
// 播放音频
|
||
playDecodedAudio(decoded)
|
||
} catch (decodeError) {
|
||
console.error('❌ AudioContext decodeAudioData failed:', decodeError);
|
||
// 降级处理:创建一个简单的音频缓冲区
|
||
createFallbackAudio(arrayBuffer)
|
||
}
|
||
}
|
||
// #endif
|
||
|
||
} catch (error) {
|
||
console.error('❌ TTS synthesis failed:', error);
|
||
isSpeaking.value = false
|
||
isComplete.value = false
|
||
}
|
||
}
|
||
|
||
// #ifdef H5
|
||
const playDecodedAudio = (decoded) => {
|
||
if (!audioContext) return;
|
||
|
||
// 创建音频源
|
||
currentSource = audioContext.createBufferSource()
|
||
currentSource.buffer = decoded
|
||
currentSource.connect(audioContext.destination)
|
||
|
||
// 监听播放结束
|
||
currentSource.onended = () => {
|
||
console.log('🎵 Audio playback completed');
|
||
isSpeaking.value = false
|
||
isComplete.value = true
|
||
}
|
||
|
||
// 开始播放
|
||
currentSource.start()
|
||
isSpeaking.value = true
|
||
isPaused.value = false
|
||
isComplete.value = false
|
||
console.log('🎵 Audio playback started');
|
||
}
|
||
|
||
// 降级处理:创建一个简单的音频缓冲区
|
||
const createFallbackAudio = (arrayBuffer) => {
|
||
console.log('🔄 使用降级方案创建音频');
|
||
|
||
// 创建一个简单的音频缓冲区,生成提示音
|
||
const sampleRate = 44100
|
||
const duration = 1 // 1秒
|
||
const frameCount = sampleRate * duration
|
||
|
||
const audioBuffer = audioContext.createBuffer(1, frameCount, sampleRate)
|
||
const channelData = audioBuffer.getChannelData(0)
|
||
|
||
// 生成一个简单的提示音(正弦波)
|
||
for (let i = 0; i < frameCount; i++) {
|
||
const t = i / sampleRate
|
||
channelData[i] = Math.sin(2 * Math.PI * 440 * t) * 0.1 // 440Hz正弦波,音量0.1
|
||
}
|
||
|
||
playDecodedAudio(audioBuffer)
|
||
}
|
||
// #endif
|
||
|
||
const pause = () => {
|
||
// #ifdef MP-WEIXIN
|
||
if (innerAudioContext && isSpeaking.value && !isPaused.value) {
|
||
console.log('⏸️ 微信小程序音频暂停');
|
||
innerAudioContext.pause()
|
||
return
|
||
}
|
||
// #endif
|
||
|
||
// #ifdef H5
|
||
if (audioContext && !isSpeaking.value || isPaused.value) {
|
||
console.warn('⚠️ Cannot pause TTS playback');
|
||
return;
|
||
}
|
||
|
||
console.log('⏸️ TTS pause called');
|
||
|
||
if (audioContext.state === 'running') {
|
||
audioContext.suspend()
|
||
isPaused.value = true
|
||
// 保存当前播放位置
|
||
playTimeOffset = audioContext.currentTime
|
||
console.log('✅ Audio paused successfully');
|
||
}
|
||
// #endif
|
||
}
|
||
|
||
const resume = () => {
|
||
// #ifdef MP-WEIXIN
|
||
if (innerAudioContext && isSpeaking.value && isPaused.value) {
|
||
console.log('▶️ 微信小程序音频恢复播放');
|
||
innerAudioContext.play()
|
||
return
|
||
}
|
||
// #endif
|
||
|
||
// #ifdef H5
|
||
if (audioContext && !isSpeaking.value || !isPaused.value) {
|
||
console.warn('⚠️ Cannot resume TTS playback');
|
||
return;
|
||
}
|
||
|
||
console.log('▶️ TTS resume called');
|
||
|
||
if (audioContext.state === 'suspended') {
|
||
audioContext.resume()
|
||
isPaused.value = false
|
||
console.log('✅ Audio resumed successfully');
|
||
}
|
||
// #endif
|
||
}
|
||
|
||
const cancelAudio = () => {
|
||
stop()
|
||
}
|
||
|
||
const stop = () => {
|
||
console.log('⏹️ TTS stop called');
|
||
|
||
// #ifdef MP-WEIXIN
|
||
if (innerAudioContext) {
|
||
try {
|
||
innerAudioContext.stop()
|
||
console.log('✅ 微信小程序音频停止');
|
||
} catch (e) {
|
||
console.error('❌ 微信小程序音频停止错误:', e);
|
||
}
|
||
}
|
||
// #endif
|
||
|
||
// #ifdef H5
|
||
if (currentSource) {
|
||
try {
|
||
currentSource.stop()
|
||
currentSource.disconnect()
|
||
} catch (e) {
|
||
console.error('❌ Error stopping audio source:', e);
|
||
}
|
||
currentSource = null
|
||
}
|
||
|
||
if (audioContext && audioContext.state === 'running') {
|
||
try {
|
||
audioContext.suspend()
|
||
} catch (e) {
|
||
console.error('❌ Error suspending audio context:', e);
|
||
}
|
||
}
|
||
// #endif
|
||
|
||
isSpeaking.value = false
|
||
isPaused.value = false
|
||
isComplete.value = false
|
||
currentAudioBuffer = null
|
||
playTimeOffset = 0
|
||
|
||
console.log('✅ TTS playback stopped');
|
||
}
|
||
|
||
onUnmounted(() => {
|
||
stop()
|
||
})
|
||
|
||
// 页面刷新/关闭时
|
||
onMounted(() => {
|
||
if (typeof window !== 'undefined') {
|
||
window.addEventListener('beforeunload', cancelAudio)
|
||
}
|
||
})
|
||
|
||
onBeforeUnmount(() => {
|
||
cancelAudio()
|
||
if (typeof window !== 'undefined') {
|
||
window.removeEventListener('beforeunload', cancelAudio)
|
||
}
|
||
})
|
||
|
||
onHide(cancelAudio)
|
||
onUnload(cancelAudio)
|
||
|
||
return {
|
||
speak,
|
||
pause,
|
||
resume,
|
||
cancelAudio,
|
||
isSpeaking,
|
||
isPaused,
|
||
isComplete
|
||
}
|
||
}
|
||
|
||
function extractSpeechText(markdown) {
|
||
console.log('🔍 extractSpeechText called');
|
||
console.log('📝 Input markdown length:', markdown ? markdown.length : 0);
|
||
console.log('📝 Input markdown preview:', markdown ? markdown.substring(0, 200) + '...' : 'No markdown');
|
||
|
||
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
|
||
const jobs = [];
|
||
let match;
|
||
let lastJobEndIndex = 0;
|
||
let firstJobStartIndex = -1;
|
||
|
||
// 提取岗位 json 数据及前后位置
|
||
while ((match = jobRegex.exec(markdown)) !== null) {
|
||
const jobStr = match[1];
|
||
try {
|
||
const job = JSON.parse(jobStr);
|
||
jobs.push(job);
|
||
if (firstJobStartIndex === -1) {
|
||
firstJobStartIndex = match.index;
|
||
}
|
||
lastJobEndIndex = jobRegex.lastIndex;
|
||
console.log('✅ Found job:', job.jobTitle);
|
||
} catch (e) {
|
||
console.warn('JSON 解析失败', e);
|
||
}
|
||
}
|
||
|
||
console.log('📊 Jobs found:', jobs.length);
|
||
console.log('📍 First job start index:', firstJobStartIndex);
|
||
console.log('📍 Last job end index:', lastJobEndIndex);
|
||
|
||
// 提取引导语(第一个 job-json 之前的文字)
|
||
const guideText = firstJobStartIndex > 0 ?
|
||
markdown.slice(0, firstJobStartIndex).trim() :
|
||
'';
|
||
|
||
// 提取结束语(最后一个 job-json 之后的文字)
|
||
const endingText = lastJobEndIndex < markdown.length ?
|
||
markdown.slice(lastJobEndIndex).trim() :
|
||
'';
|
||
|
||
console.log('📝 Guide text:', guideText);
|
||
console.log('📝 Ending text:', endingText);
|
||
|
||
// 岗位信息格式化为语音文本
|
||
const jobTexts = jobs.map((job, index) => {
|
||
return `第 ${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}。`;
|
||
});
|
||
|
||
// 拼接总语音内容
|
||
const finalTextParts = [];
|
||
if (guideText) finalTextParts.push(guideText);
|
||
finalTextParts.push(...jobTexts);
|
||
if (endingText) finalTextParts.push(endingText);
|
||
|
||
const finalText = finalTextParts.join('\n');
|
||
console.log('🎤 Final TTS text length:', finalText.length);
|
||
console.log('🎤 Final TTS text preview:', finalText.substring(0, 200) + '...');
|
||
console.log('🎤 Final TTS text parts count:', finalTextParts.length);
|
||
|
||
return finalText;
|
||
} |