flat: 暂存
This commit is contained in:
@@ -1,136 +0,0 @@
|
||||
import {
|
||||
ref,
|
||||
onBeforeUnmount,
|
||||
onMounted
|
||||
} from 'vue'
|
||||
import {
|
||||
onHide,
|
||||
onUnload
|
||||
} from '@dcloudio/uni-app'
|
||||
|
||||
|
||||
|
||||
export function useSpeechReader() {
|
||||
const isSpeaking = ref(false)
|
||||
const isPaused = ref(false)
|
||||
let utterance = null
|
||||
|
||||
const cleanMarkdown = (text) => {
|
||||
return formatTextForSpeech(text)
|
||||
}
|
||||
|
||||
const speak = (text, options = {
|
||||
lang: 'zh-CN',
|
||||
rate: 0.9,
|
||||
pitch: 1.2
|
||||
}) => {
|
||||
cancelAudio() // 重置之前的
|
||||
// const voices = speechSynthesis.getVoices()
|
||||
// const chineseVoices = voices.filter(v => v.lang.includes('zh'))
|
||||
const speechText = extractSpeechText(text);
|
||||
utterance = new SpeechSynthesisUtterance(speechText)
|
||||
// utterance.lang = options.lang || 'zh'
|
||||
utterance.rate = options.rate || 1
|
||||
utterance.pitch = options.pitch || 1.1 // 音调(0 - 2,偏高比较柔和)
|
||||
|
||||
utterance.onend = () => {
|
||||
isSpeaking.value = false
|
||||
isPaused.value = false
|
||||
}
|
||||
|
||||
speechSynthesis.speak(utterance)
|
||||
isSpeaking.value = true
|
||||
isPaused.value = false
|
||||
}
|
||||
|
||||
const pause = () => {
|
||||
if (isSpeaking.value && !isPaused.value) {
|
||||
speechSynthesis.pause()
|
||||
isPaused.value = true
|
||||
}
|
||||
}
|
||||
|
||||
const resume = () => {
|
||||
if (isSpeaking.value && isPaused.value) {
|
||||
speechSynthesis.resume()
|
||||
isPaused.value = false
|
||||
}
|
||||
}
|
||||
|
||||
const cancelAudio = () => {
|
||||
speechSynthesis.cancel()
|
||||
isSpeaking.value = false
|
||||
isPaused.value = false
|
||||
}
|
||||
// 页面刷新/关闭时
|
||||
onMounted(() => {
|
||||
if (typeof window !== 'undefined') {
|
||||
window.addEventListener('beforeunload', cancelAudio)
|
||||
}
|
||||
})
|
||||
|
||||
onBeforeUnmount(() => {
|
||||
cancelAudio()
|
||||
if (typeof window !== 'undefined') {
|
||||
window.removeEventListener('beforeunload', cancelAudio)
|
||||
}
|
||||
})
|
||||
|
||||
onHide(cancelAudio)
|
||||
onUnload(cancelAudio)
|
||||
|
||||
return {
|
||||
speak,
|
||||
pause,
|
||||
resume,
|
||||
cancelAudio,
|
||||
isSpeaking,
|
||||
isPaused,
|
||||
}
|
||||
}
|
||||
|
||||
function extractSpeechText(markdown) {
|
||||
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
|
||||
const jobs = [];
|
||||
let match;
|
||||
let lastJobEndIndex = 0;
|
||||
let firstJobStartIndex = -1;
|
||||
|
||||
// 提取岗位 json 数据及前后位置
|
||||
while ((match = jobRegex.exec(markdown)) !== null) {
|
||||
const jobStr = match[1];
|
||||
try {
|
||||
const job = JSON.parse(jobStr);
|
||||
jobs.push(job);
|
||||
if (firstJobStartIndex === -1) {
|
||||
firstJobStartIndex = match.index;
|
||||
}
|
||||
lastJobEndIndex = jobRegex.lastIndex;
|
||||
} catch (e) {
|
||||
console.warn('JSON 解析失败', e);
|
||||
}
|
||||
}
|
||||
|
||||
// 提取引导语(第一个 job-json 之前的文字)
|
||||
const guideText = firstJobStartIndex > 0 ?
|
||||
markdown.slice(0, firstJobStartIndex).trim() :
|
||||
'';
|
||||
|
||||
// 提取结束语(最后一个 job-json 之后的文字)
|
||||
const endingText = lastJobEndIndex < markdown.length ?
|
||||
markdown.slice(lastJobEndIndex).trim() :
|
||||
'';
|
||||
|
||||
// 岗位信息格式化为语音文本
|
||||
const jobTexts = jobs.map((job, index) => {
|
||||
return `第 ${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}。`;
|
||||
});
|
||||
|
||||
// 拼接总语音内容
|
||||
const finalTextParts = [];
|
||||
if (guideText) finalTextParts.push(guideText);
|
||||
finalTextParts.push(...jobTexts);
|
||||
if (endingText) finalTextParts.push(endingText);
|
||||
|
||||
return finalTextParts.join('\n');
|
||||
}
|
||||
157
hook/useSystemPlayer.js
Normal file
157
hook/useSystemPlayer.js
Normal file
@@ -0,0 +1,157 @@
|
||||
import {
|
||||
ref,
|
||||
onUnmounted,
|
||||
readonly
|
||||
} from 'vue';
|
||||
|
||||
const defaultExtractSpeechText = (text) => text;
|
||||
|
||||
|
||||
export function useTTSPlayer() {
|
||||
const synth = window.speechSynthesis;
|
||||
const isSpeaking = ref(false);
|
||||
const isPaused = ref(false);
|
||||
const utteranceRef = ref(null);
|
||||
|
||||
const cleanup = () => {
|
||||
isSpeaking.value = false;
|
||||
isPaused.value = false;
|
||||
utteranceRef.value = null;
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {string} text - The text to be spoken.
|
||||
* @param {object} [options] - Optional settings for the speech.
|
||||
* @param {string} [options.lang] - Language (e.g., 'en-US', 'es-ES').
|
||||
* @param {number} [options.rate] - Speed (0.1 to 10, default 1).
|
||||
* @param {number} [options.pitch] - Pitch (0 to 2, default 1).
|
||||
* @param {SpeechSynthesisVoice} [options.voice] - A specific voice object.
|
||||
* @param {function(string): string} [options.extractSpeechText] - A function to filter/clean the text before speaking.
|
||||
*/
|
||||
const speak = (text, options = {}) => {
|
||||
if (!synth) {
|
||||
console.error('SpeechSynthesis API is not supported in this browser.');
|
||||
return;
|
||||
}
|
||||
|
||||
if (isSpeaking.value) {
|
||||
synth.cancel();
|
||||
}
|
||||
|
||||
const filteredText = extractSpeechText(text);
|
||||
|
||||
if (!filteredText || typeof filteredText !== 'string' || filteredText.trim() === '') {
|
||||
console.warn('Text to speak is empty after filtering.');
|
||||
cleanup(); // Ensure state is clean
|
||||
return;
|
||||
}
|
||||
|
||||
const newUtterance = new SpeechSynthesisUtterance(filteredText); // Use filtered text
|
||||
utteranceRef.value = newUtterance;
|
||||
|
||||
newUtterance.rate = options.rate || 1;
|
||||
newUtterance.pitch = options.pitch || 1;
|
||||
if (options.voice) {
|
||||
newUtterance.voice = options.voice;
|
||||
}
|
||||
|
||||
newUtterance.onstart = () => {
|
||||
isSpeaking.value = true;
|
||||
isPaused.value = false;
|
||||
};
|
||||
|
||||
newUtterance.onpause = () => {
|
||||
isPaused.value = true;
|
||||
};
|
||||
newUtterance.onresume = () => {
|
||||
isPaused.value = false;
|
||||
};
|
||||
newUtterance.onend = () => {
|
||||
cleanup();
|
||||
};
|
||||
newUtterance.onerror = (event) => {
|
||||
console.error('SpeechSynthesis Error:', event.error);
|
||||
cleanup();
|
||||
};
|
||||
|
||||
synth.speak(newUtterance);
|
||||
};
|
||||
|
||||
const pause = () => {
|
||||
if (synth && isSpeaking.value && !isPaused.value) {
|
||||
synth.pause();
|
||||
}
|
||||
};
|
||||
|
||||
const resume = () => {
|
||||
if (synth && isPaused.value) {
|
||||
synth.resume();
|
||||
}
|
||||
};
|
||||
|
||||
const cancelAudio = () => {
|
||||
if (synth) {
|
||||
synth.cancel();
|
||||
}
|
||||
cleanup();
|
||||
};
|
||||
|
||||
onUnmounted(() => {
|
||||
cancelAudio();
|
||||
});
|
||||
|
||||
return {
|
||||
speak,
|
||||
pause,
|
||||
resume,
|
||||
cancelAudio,
|
||||
isSpeaking: readonly(isSpeaking),
|
||||
isPaused: readonly(isPaused),
|
||||
};
|
||||
}
|
||||
|
||||
function extractSpeechText(markdown) {
|
||||
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
|
||||
const jobs = [];
|
||||
let match;
|
||||
let lastJobEndIndex = 0;
|
||||
let firstJobStartIndex = -1;
|
||||
|
||||
// 提取岗位 json 数据及前后位置
|
||||
while ((match = jobRegex.exec(markdown)) !== null) {
|
||||
const jobStr = match[1];
|
||||
try {
|
||||
const job = JSON.parse(jobStr);
|
||||
jobs.push(job);
|
||||
if (firstJobStartIndex === -1) {
|
||||
firstJobStartIndex = match.index;
|
||||
}
|
||||
lastJobEndIndex = jobRegex.lastIndex;
|
||||
} catch (e) {
|
||||
console.warn('JSON 解析失败', e);
|
||||
}
|
||||
}
|
||||
|
||||
// 提取引导语(第一个 job-json 之前的文字)
|
||||
const guideText = firstJobStartIndex > 0 ?
|
||||
markdown.slice(0, firstJobStartIndex).trim() :
|
||||
'';
|
||||
|
||||
// 提取结束语(最后一个 job-json 之后的文字)
|
||||
const endingText = lastJobEndIndex < markdown.length ?
|
||||
markdown.slice(lastJobEndIndex).trim() :
|
||||
'';
|
||||
|
||||
// 岗位信息格式化为语音文本
|
||||
const jobTexts = jobs.map((job, index) => {
|
||||
return `第 ${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}。`;
|
||||
});
|
||||
|
||||
// 拼接总语音内容
|
||||
const finalTextParts = [];
|
||||
if (guideText) finalTextParts.push(guideText);
|
||||
finalTextParts.push(...jobTexts);
|
||||
if (endingText) finalTextParts.push(endingText);
|
||||
|
||||
return finalTextParts.join('\n');
|
||||
}
|
||||
203
hook/useSystemSpeechReader.js
Normal file
203
hook/useSystemSpeechReader.js
Normal file
@@ -0,0 +1,203 @@
|
||||
import {
|
||||
ref,
|
||||
readonly,
|
||||
onUnmounted
|
||||
} from 'vue';
|
||||
|
||||
// 检查 API 兼容性
|
||||
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
||||
const isApiSupported = !!SpeechRecognition && !!navigator.mediaDevices && !!window.AudioContext;
|
||||
|
||||
/**
|
||||
* @param {object} [options]
|
||||
* @param {string} [options.lang] - Language code (e.g., 'zh-CN', 'en-US')
|
||||
* @returns {object}
|
||||
*/
|
||||
export function useAudioRecorder(options = {}) {
|
||||
const lang = options.lang || 'zh-CN'; // 默认使用中文
|
||||
|
||||
const isRecording = ref(false);
|
||||
const recognizedText = ref(''); // 完整的识别文本(包含临时的)
|
||||
const lastFinalText = ref(''); // 最后一段已确定的文本
|
||||
const volumeLevel = ref(0); // 音量 (0-100)
|
||||
const audioDataForDisplay = ref(new Uint8Array()); // 波形数据
|
||||
|
||||
let recognition = null;
|
||||
let audioContext = null;
|
||||
let analyser = null;
|
||||
let mediaStreamSource = null;
|
||||
let mediaStream = null;
|
||||
let dataArray = null; // 用于音量和波形
|
||||
let animationFrameId = null;
|
||||
|
||||
if (!isApiSupported) {
|
||||
console.warn(
|
||||
'此浏览器不支持Web语音API或Web音频API。钩子无法正常工作。'
|
||||
);
|
||||
return {
|
||||
isRecording: readonly(isRecording),
|
||||
startRecording: () => console.error('Audio recording not supported.'),
|
||||
stopRecording: () => {},
|
||||
cancelRecording: () => {},
|
||||
audioDataForDisplay: readonly(audioDataForDisplay),
|
||||
volumeLevel: readonly(volumeLevel),
|
||||
recognizedText: readonly(recognizedText),
|
||||
lastFinalText: readonly(lastFinalText),
|
||||
};
|
||||
}
|
||||
|
||||
const setupRecognition = () => {
|
||||
recognition = new SpeechRecognition();
|
||||
recognition.lang = lang;
|
||||
recognition.continuous = true; // 持续识别
|
||||
recognition.interimResults = true; // 返回临时结果
|
||||
|
||||
recognition.onstart = () => {
|
||||
isRecording.value = true;
|
||||
};
|
||||
|
||||
recognition.onend = () => {
|
||||
isRecording.value = false;
|
||||
stopAudioAnalysis(); // 语音识别停止时,也停止音频分析
|
||||
};
|
||||
|
||||
recognition.onerror = (event) => {
|
||||
console.error('SpeechRecognition Error:', event.error);
|
||||
isRecording.value = false;
|
||||
stopAudioAnalysis();
|
||||
};
|
||||
|
||||
recognition.onresult = (event) => {
|
||||
let interim = '';
|
||||
let final = '';
|
||||
|
||||
for (let i = 0; i < event.results.length; i++) {
|
||||
const transcript = event.results[i][0].transcript;
|
||||
if (event.results[i].isFinal) {
|
||||
final += transcript;
|
||||
lastFinalText.value = transcript; // 存储最后一段确定的文本
|
||||
} else {
|
||||
interim += transcript;
|
||||
}
|
||||
}
|
||||
recognizedText.value = final + interim; // 组合为完整文本
|
||||
};
|
||||
};
|
||||
|
||||
const startAudioAnalysis = async () => {
|
||||
try {
|
||||
mediaStream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
audioContext = new AudioContext();
|
||||
analyser = audioContext.createAnalyser();
|
||||
mediaStreamSource = audioContext.createMediaStreamSource(mediaStream);
|
||||
|
||||
// 设置 Analyser
|
||||
analyser.fftSize = 512; // 必须是 2 的幂
|
||||
const bufferLength = analyser.frequencyBinCount;
|
||||
dataArray = new Uint8Array(bufferLength); // 用于波形
|
||||
|
||||
// 连接节点
|
||||
mediaStreamSource.connect(analyser);
|
||||
|
||||
// 开始循环分析
|
||||
updateAudioData();
|
||||
} catch (err) {
|
||||
console.error('Failed to get media stream or setup AudioContext:', err);
|
||||
if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') {
|
||||
alert('麦克风权限被拒绝。请在浏览器设置中允许访问麦克风。');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const updateAudioData = () => {
|
||||
if (!isRecording.value) return; // 如果停止了就退出循环
|
||||
|
||||
// 获取时域数据 (波形)
|
||||
analyser.getByteTimeDomainData(dataArray);
|
||||
audioDataForDisplay.value = new Uint8Array(dataArray); // 复制数组以触发响应式
|
||||
|
||||
// 计算音量 (RMS)
|
||||
let sumSquares = 0.0;
|
||||
for (const amplitude of dataArray) {
|
||||
const normalized = (amplitude / 128.0) - 1.0; // 转换为 -1.0 到 1.0
|
||||
sumSquares += normalized * normalized;
|
||||
}
|
||||
const rms = Math.sqrt(sumSquares / dataArray.length);
|
||||
volumeLevel.value = Math.min(100, Math.floor(rms * 250)); // 放大 RMS 值到 0-100 范围
|
||||
|
||||
animationFrameId = requestAnimationFrame(updateAudioData);
|
||||
};
|
||||
|
||||
const stopAudioAnalysis = () => {
|
||||
if (animationFrameId) {
|
||||
cancelAnimationFrame(animationFrameId);
|
||||
animationFrameId = null;
|
||||
}
|
||||
// 停止麦克风轨道
|
||||
mediaStream?.getTracks().forEach((track) => track.stop());
|
||||
// 关闭 AudioContext
|
||||
audioContext?.close().catch((e) => console.error('Error closing AudioContext', e));
|
||||
|
||||
mediaStream = null;
|
||||
audioContext = null;
|
||||
analyser = null;
|
||||
mediaStreamSource = null;
|
||||
volumeLevel.value = 0;
|
||||
audioDataForDisplay.value = new Uint8Array();
|
||||
};
|
||||
|
||||
const startRecording = async () => {
|
||||
if (isRecording.value) return;
|
||||
|
||||
// 重置状态
|
||||
recognizedText.value = '';
|
||||
lastFinalText.value = '';
|
||||
|
||||
try {
|
||||
// 必须先启动音频分析以获取麦克风权限
|
||||
await startAudioAnalysis();
|
||||
|
||||
// 如果音频启动成功 (mediaStream 存在),则启动语音识别
|
||||
if (mediaStream) {
|
||||
setupRecognition();
|
||||
recognition.start();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error);
|
||||
}
|
||||
};
|
||||
|
||||
const stopRecording = () => {
|
||||
if (!isRecording.value || !recognition) return;
|
||||
recognition.stop(); // 这将触发 onend 事件,自动停止音频分析
|
||||
};
|
||||
|
||||
const cancelRecording = () => {
|
||||
if (!recognition) return;
|
||||
isRecording.value = false; // 立即设置状态
|
||||
recognition.abort(); // 这也会触发 onend
|
||||
recognizedText.value = '';
|
||||
lastFinalText.value = '';
|
||||
};
|
||||
|
||||
onUnmounted(() => {
|
||||
if (recognition) {
|
||||
recognition.abort();
|
||||
}
|
||||
stopAudioAnalysis();
|
||||
});
|
||||
|
||||
return {
|
||||
isRecording: readonly(isRecording),
|
||||
startRecording,
|
||||
stopRecording,
|
||||
cancelRecording,
|
||||
audioDataForDisplay: readonly(audioDataForDisplay),
|
||||
volumeLevel: readonly(volumeLevel),
|
||||
recognizedText: readonly(recognizedText),
|
||||
lastFinalText: readonly(lastFinalText),
|
||||
isApiSupported, // 导出支持状态
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user