flat: 暂存
This commit is contained in:
203
hook/useSystemSpeechReader.js
Normal file
203
hook/useSystemSpeechReader.js
Normal file
@@ -0,0 +1,203 @@
|
||||
import {
|
||||
ref,
|
||||
readonly,
|
||||
onUnmounted
|
||||
} from 'vue';
|
||||
|
||||
// 检查 API 兼容性
|
||||
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
||||
const isApiSupported = !!SpeechRecognition && !!navigator.mediaDevices && !!window.AudioContext;
|
||||
|
||||
/**
|
||||
* @param {object} [options]
|
||||
* @param {string} [options.lang] - Language code (e.g., 'zh-CN', 'en-US')
|
||||
* @returns {object}
|
||||
*/
|
||||
export function useAudioRecorder(options = {}) {
|
||||
const lang = options.lang || 'zh-CN'; // 默认使用中文
|
||||
|
||||
const isRecording = ref(false);
|
||||
const recognizedText = ref(''); // 完整的识别文本(包含临时的)
|
||||
const lastFinalText = ref(''); // 最后一段已确定的文本
|
||||
const volumeLevel = ref(0); // 音量 (0-100)
|
||||
const audioDataForDisplay = ref(new Uint8Array()); // 波形数据
|
||||
|
||||
let recognition = null;
|
||||
let audioContext = null;
|
||||
let analyser = null;
|
||||
let mediaStreamSource = null;
|
||||
let mediaStream = null;
|
||||
let dataArray = null; // 用于音量和波形
|
||||
let animationFrameId = null;
|
||||
|
||||
if (!isApiSupported) {
|
||||
console.warn(
|
||||
'此浏览器不支持Web语音API或Web音频API。钩子无法正常工作。'
|
||||
);
|
||||
return {
|
||||
isRecording: readonly(isRecording),
|
||||
startRecording: () => console.error('Audio recording not supported.'),
|
||||
stopRecording: () => {},
|
||||
cancelRecording: () => {},
|
||||
audioDataForDisplay: readonly(audioDataForDisplay),
|
||||
volumeLevel: readonly(volumeLevel),
|
||||
recognizedText: readonly(recognizedText),
|
||||
lastFinalText: readonly(lastFinalText),
|
||||
};
|
||||
}
|
||||
|
||||
const setupRecognition = () => {
|
||||
recognition = new SpeechRecognition();
|
||||
recognition.lang = lang;
|
||||
recognition.continuous = true; // 持续识别
|
||||
recognition.interimResults = true; // 返回临时结果
|
||||
|
||||
recognition.onstart = () => {
|
||||
isRecording.value = true;
|
||||
};
|
||||
|
||||
recognition.onend = () => {
|
||||
isRecording.value = false;
|
||||
stopAudioAnalysis(); // 语音识别停止时,也停止音频分析
|
||||
};
|
||||
|
||||
recognition.onerror = (event) => {
|
||||
console.error('SpeechRecognition Error:', event.error);
|
||||
isRecording.value = false;
|
||||
stopAudioAnalysis();
|
||||
};
|
||||
|
||||
recognition.onresult = (event) => {
|
||||
let interim = '';
|
||||
let final = '';
|
||||
|
||||
for (let i = 0; i < event.results.length; i++) {
|
||||
const transcript = event.results[i][0].transcript;
|
||||
if (event.results[i].isFinal) {
|
||||
final += transcript;
|
||||
lastFinalText.value = transcript; // 存储最后一段确定的文本
|
||||
} else {
|
||||
interim += transcript;
|
||||
}
|
||||
}
|
||||
recognizedText.value = final + interim; // 组合为完整文本
|
||||
};
|
||||
};
|
||||
|
||||
const startAudioAnalysis = async () => {
|
||||
try {
|
||||
mediaStream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
audioContext = new AudioContext();
|
||||
analyser = audioContext.createAnalyser();
|
||||
mediaStreamSource = audioContext.createMediaStreamSource(mediaStream);
|
||||
|
||||
// 设置 Analyser
|
||||
analyser.fftSize = 512; // 必须是 2 的幂
|
||||
const bufferLength = analyser.frequencyBinCount;
|
||||
dataArray = new Uint8Array(bufferLength); // 用于波形
|
||||
|
||||
// 连接节点
|
||||
mediaStreamSource.connect(analyser);
|
||||
|
||||
// 开始循环分析
|
||||
updateAudioData();
|
||||
} catch (err) {
|
||||
console.error('Failed to get media stream or setup AudioContext:', err);
|
||||
if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') {
|
||||
alert('麦克风权限被拒绝。请在浏览器设置中允许访问麦克风。');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const updateAudioData = () => {
|
||||
if (!isRecording.value) return; // 如果停止了就退出循环
|
||||
|
||||
// 获取时域数据 (波形)
|
||||
analyser.getByteTimeDomainData(dataArray);
|
||||
audioDataForDisplay.value = new Uint8Array(dataArray); // 复制数组以触发响应式
|
||||
|
||||
// 计算音量 (RMS)
|
||||
let sumSquares = 0.0;
|
||||
for (const amplitude of dataArray) {
|
||||
const normalized = (amplitude / 128.0) - 1.0; // 转换为 -1.0 到 1.0
|
||||
sumSquares += normalized * normalized;
|
||||
}
|
||||
const rms = Math.sqrt(sumSquares / dataArray.length);
|
||||
volumeLevel.value = Math.min(100, Math.floor(rms * 250)); // 放大 RMS 值到 0-100 范围
|
||||
|
||||
animationFrameId = requestAnimationFrame(updateAudioData);
|
||||
};
|
||||
|
||||
const stopAudioAnalysis = () => {
|
||||
if (animationFrameId) {
|
||||
cancelAnimationFrame(animationFrameId);
|
||||
animationFrameId = null;
|
||||
}
|
||||
// 停止麦克风轨道
|
||||
mediaStream?.getTracks().forEach((track) => track.stop());
|
||||
// 关闭 AudioContext
|
||||
audioContext?.close().catch((e) => console.error('Error closing AudioContext', e));
|
||||
|
||||
mediaStream = null;
|
||||
audioContext = null;
|
||||
analyser = null;
|
||||
mediaStreamSource = null;
|
||||
volumeLevel.value = 0;
|
||||
audioDataForDisplay.value = new Uint8Array();
|
||||
};
|
||||
|
||||
const startRecording = async () => {
|
||||
if (isRecording.value) return;
|
||||
|
||||
// 重置状态
|
||||
recognizedText.value = '';
|
||||
lastFinalText.value = '';
|
||||
|
||||
try {
|
||||
// 必须先启动音频分析以获取麦克风权限
|
||||
await startAudioAnalysis();
|
||||
|
||||
// 如果音频启动成功 (mediaStream 存在),则启动语音识别
|
||||
if (mediaStream) {
|
||||
setupRecognition();
|
||||
recognition.start();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error);
|
||||
}
|
||||
};
|
||||
|
||||
const stopRecording = () => {
|
||||
if (!isRecording.value || !recognition) return;
|
||||
recognition.stop(); // 这将触发 onend 事件,自动停止音频分析
|
||||
};
|
||||
|
||||
const cancelRecording = () => {
|
||||
if (!recognition) return;
|
||||
isRecording.value = false; // 立即设置状态
|
||||
recognition.abort(); // 这也会触发 onend
|
||||
recognizedText.value = '';
|
||||
lastFinalText.value = '';
|
||||
};
|
||||
|
||||
onUnmounted(() => {
|
||||
if (recognition) {
|
||||
recognition.abort();
|
||||
}
|
||||
stopAudioAnalysis();
|
||||
});
|
||||
|
||||
return {
|
||||
isRecording: readonly(isRecording),
|
||||
startRecording,
|
||||
stopRecording,
|
||||
cancelRecording,
|
||||
audioDataForDisplay: readonly(audioDataForDisplay),
|
||||
volumeLevel: readonly(volumeLevel),
|
||||
recognizedText: readonly(recognizedText),
|
||||
lastFinalText: readonly(lastFinalText),
|
||||
isApiSupported, // 导出支持状态
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user