Files
qingdao-employment-service/hook/useRealtimeRecorderOnce.js

581 lines
17 KiB
JavaScript
Raw Normal View History

2025-12-26 16:44:11 +08:00
import {
ref,
onUnmounted
} from 'vue'
import config from '@/config'
export function useRealtimeRecorderOnce() {
// --- 状态定义 ---
const isRecording = ref(false)
2025-12-26 18:41:58 +08:00
const isProcessing = ref(false)
2025-12-26 16:44:11 +08:00
const recordingDuration = ref(0)
const volumeLevel = ref(0) // 0-100
const recognizedText = ref('')
2025-12-26 18:41:58 +08:00
const audioData = ref(null)
const audioDataForDisplay = ref([])
2025-12-26 16:44:11 +08:00
// --- 内部变量 ---
let durationTimer = null
// --- APP/小程序 变量 ---
let recorderManager = null;
2025-12-26 18:41:58 +08:00
let appAudioChunks = [];
2025-12-26 16:44:11 +08:00
// --- H5 变量 ---
let audioContext = null;
2025-12-26 18:41:58 +08:00
let mediaRecorder = null;
2025-12-26 16:44:11 +08:00
let h5Stream = null;
2025-12-26 18:41:58 +08:00
let h5AudioChunks = [];
let analyser = null;
let dataArray = null;
2025-12-26 16:44:11 +08:00
// --- 配置项 ---
const RECORD_CONFIG = {
duration: 600000,
sampleRate: 16000,
numberOfChannels: 1,
2025-12-26 18:41:58 +08:00
format: 'wav',
encodeBitRate: 16000,
2025-12-26 16:44:11 +08:00
frameSize: 4096
}
2025-12-26 18:41:58 +08:00
// --- WAV文件头函数 ---
const encodeWAV = (samples, sampleRate = 16000, numChannels = 1, bitsPerSample = 16) => {
const bytesPerSample = bitsPerSample / 8;
const blockAlign = numChannels * bytesPerSample;
const byteRate = sampleRate * blockAlign;
const dataSize = samples.length * bytesPerSample;
const buffer = new ArrayBuffer(44 + dataSize);
const view = new DataView(buffer);
// RIFF chunk descriptor
writeString(view, 0, 'RIFF');
view.setUint32(4, 36 + dataSize, true);
writeString(view, 8, 'WAVE');
// fmt sub-chunk
writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true); // Subchunk1Size (16 for PCM)
view.setUint16(20, 1, true); // AudioFormat (1 for PCM)
view.setUint16(22, numChannels, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, byteRate, true);
view.setUint16(32, blockAlign, true);
view.setUint16(34, bitsPerSample, true);
// data sub-chunk
writeString(view, 36, 'data');
view.setUint32(40, dataSize, true);
// Write audio samples
const volume = 1;
let offset = 44;
for (let i = 0; i < samples.length; i++) {
let sample = Math.max(-1, Math.min(1, samples[i]));
sample = sample * volume;
view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7FFF, true);
offset += 2;
}
return buffer;
}
const writeString = (view, offset, string) => {
for (let i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
const floatTo16BitPCM = (output, offset, input) => {
for (let i = 0; i < input.length; i++, offset += 2) {
const s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
// --- 音量计算函数 ---
const calculateVolumeFromFloat32 = (float32Array) => {
let sum = 0;
const length = float32Array.length;
// 计算RMS (均方根)
for (let i = 0; i < length; i++) {
sum += float32Array[i] * float32Array[i];
}
const rms = Math.sqrt(sum / length);
// 转换为0-100的值
// 通常对话语音的RMS在0.01-0.1之间尖叫可达0.3
let volume = Math.min(100, Math.floor(rms * 300));
// 设置最小阈值避免静音时完全为0
if (volume < 5) volume = 0;
return volume;
}
const calculateVolumeFromInt16 = (int16Array) => {
let sum = 0;
const length = int16Array.length;
// 计算RMS
for (let i = 0; i < length; i++) {
const normalized = int16Array[i] / 32768; // 归一化到[-1, 1]
sum += normalized * normalized;
}
const rms = Math.sqrt(sum / length);
// 转换为0-100的值
let volume = Math.min(100, Math.floor(rms * 300));
// 设置最小阈值
if (volume < 5) volume = 0;
return volume;
}
2025-12-26 16:44:11 +08:00
/**
* 开始录音 (入口)
*/
const startRecording = async () => {
if (isRecording.value) return
try {
recognizedText.value = ''
volumeLevel.value = 0
audioData.value = null
audioDataForDisplay.value = []
appAudioChunks = []
h5AudioChunks = []
// #ifdef H5
if (location.protocol !== 'https:' && location.hostname !== 'localhost') {
uni.showToast({
title: 'H5录音需要HTTPS环境',
icon: 'none'
});
return;
}
// #endif
// #ifdef H5
await startH5Recording();
// #endif
// #ifndef H5
startAppRecording();
// #endif
isRecording.value = true;
recordingDuration.value = 0;
durationTimer = setInterval(() => recordingDuration.value++, 1000);
// 启动波形显示更新
updateAudioDataForDisplay();
} catch (err) {
console.error('启动失败:', err);
uni.showToast({
title: '启动失败: ' + (err.message || ''),
icon: 'none'
});
cleanup();
}
}
/**
2025-12-26 18:41:58 +08:00
* H5录音实现 - 手动构建WAV文件
2025-12-26 16:44:11 +08:00
*/
const startH5Recording = async () => {
try {
// 1. 获取麦克风流
const stream = await navigator.mediaDevices.getUserMedia({
2025-12-26 18:41:58 +08:00
audio: {
sampleRate: 16000,
channelCount: 1,
echoCancellation: true,
noiseSuppression: true,
autoGainControl: false
}
2025-12-26 16:44:11 +08:00
});
h5Stream = stream;
2025-12-26 18:41:58 +08:00
// 2. 创建 AudioContext 用于处理音频
2025-12-26 16:44:11 +08:00
const AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext({
2025-12-26 18:41:58 +08:00
sampleRate: 16000,
latencyHint: 'interactive'
2025-12-26 16:44:11 +08:00
});
2025-12-26 18:41:58 +08:00
// 创建音频处理节点
const source = audioContext.createMediaStreamSource(stream);
// 创建分析器用于音量计算
analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.8;
dataArray = new Float32Array(analyser.frequencyBinCount);
source.connect(analyser);
// 创建脚本处理器用于收集音频数据
const processor = audioContext.createScriptProcessor(4096, 1, 1);
// 存储所有音频样本
let audioSamples = [];
processor.onaudioprocess = (e) => {
2025-12-26 16:44:11 +08:00
if (!isRecording.value) return;
2025-12-26 18:41:58 +08:00
// 获取输入数据
const inputData = e.inputBuffer.getChannelData(0);
// 计算音量
analyser.getFloatTimeDomainData(dataArray);
const volume = calculateVolumeFromFloat32(dataArray);
volumeLevel.value = volume;
// 收集音频样本
2025-12-26 16:44:11 +08:00
for (let i = 0; i < inputData.length; i++) {
2025-12-26 18:41:58 +08:00
audioSamples.push(inputData[i]);
2025-12-26 16:44:11 +08:00
}
2025-12-26 18:41:58 +08:00
// 存储当前音频数据块
const buffer = new Float32Array(inputData.length);
buffer.set(inputData);
2025-12-26 16:44:11 +08:00
h5AudioChunks.push(buffer);
};
2025-12-26 18:41:58 +08:00
source.connect(processor);
processor.connect(audioContext.destination);
console.log('H5 16kHz WAV录音已启动');
2025-12-26 16:44:11 +08:00
} catch (err) {
2025-12-26 18:41:58 +08:00
console.error('H5录音启动失败:', err);
2025-12-26 16:44:11 +08:00
throw err;
}
}
/**
* 停止H5录音资源
*/
const stopH5Resources = () => {
2025-12-26 18:41:58 +08:00
// 断开所有连接
if (audioContext && audioContext.state !== 'closed') {
audioContext.close();
}
// 停止音轨
if (h5Stream) {
h5Stream.getTracks().forEach(track => track.stop());
}
2025-12-26 16:44:11 +08:00
audioContext = null;
2025-12-26 18:41:58 +08:00
analyser = null;
dataArray = null;
2025-12-26 16:44:11 +08:00
h5Stream = null;
}
/**
* APP/小程序录音实现
*/
const startAppRecording = () => {
recorderManager = uni.getRecorderManager();
recorderManager.onFrameRecorded((res) => {
2025-12-26 18:41:58 +08:00
const { frameBuffer } = res;
2025-12-26 16:44:11 +08:00
if (frameBuffer && frameBuffer.byteLength > 0) {
2025-12-26 18:41:58 +08:00
// 计算音量
const int16Data = new Int16Array(frameBuffer);
const volume = calculateVolumeFromInt16(int16Data);
volumeLevel.value = volume;
// 保存音频数据
2025-12-26 16:44:11 +08:00
appAudioChunks.push(frameBuffer);
}
});
recorderManager.onStart(() => {
2025-12-26 18:41:58 +08:00
console.log('APP 16kHz WAV录音已开始');
2025-12-26 16:44:11 +08:00
});
recorderManager.onError((err) => {
2025-12-26 18:41:58 +08:00
console.error('APP录音报错:', err);
uni.showToast({
title: '录音失败: ' + err.errMsg,
icon: 'none'
});
2025-12-26 16:44:11 +08:00
cleanup();
});
recorderManager.start(RECORD_CONFIG);
}
/**
* 停止录音 (通用)
*/
const stopRecording = async () => {
if (!isRecording.value) return;
isRecording.value = false;
clearInterval(durationTimer);
// 停止硬件录音
stopHardwareResource();
// 处理录音数据
await processAudioData();
}
/**
* 取消录音
*/
const cancelRecording = () => {
if (!isRecording.value) return;
console.log('取消录音 - 丢弃结果');
// 1. 停止硬件录音
stopHardwareResource();
// 2. 清理状态
recognizedText.value = '';
audioData.value = null;
audioDataForDisplay.value = [];
appAudioChunks = [];
h5AudioChunks = [];
// 3. 清理资源
cleanup();
}
/**
* 停止硬件资源
*/
const stopHardwareResource = () => {
// APP/小程序停止
if (recorderManager) {
recorderManager.stop();
}
// H5停止
// #ifdef H5
stopH5Resources();
// #endif
}
/**
* 更新音频数据显示
*/
const updateAudioDataForDisplay = () => {
const updateInterval = setInterval(() => {
if (!isRecording.value) {
clearInterval(updateInterval);
2025-12-26 18:41:58 +08:00
audioDataForDisplay.value = [];
2025-12-26 16:44:11 +08:00
return;
}
2025-12-26 18:41:58 +08:00
// 生成波形数据,基于当前音量
const baseValue = volumeLevel.value / 100;
2025-12-26 16:44:11 +08:00
const data = [];
2025-12-26 18:41:58 +08:00
// 生成31个数据点
2025-12-26 16:44:11 +08:00
for (let i = 0; i < 31; i++) {
2025-12-26 18:41:58 +08:00
// 使用正弦波生成波形效果,中间高两边低
const position = i / 30;
2025-12-26 16:44:11 +08:00
const centerDistance = Math.abs(position - 0.5);
2025-12-26 18:41:58 +08:00
const waveValue = Math.sin(Date.now() / 200 + i * 0.3) * 0.4 + 0.5;
// 音量因子确保最小显示高度
const volumeFactor = baseValue * 0.7 + 0.3;
2025-12-26 16:44:11 +08:00
2025-12-26 18:41:58 +08:00
// 综合计算最终值
let finalValue = waveValue * (1 - centerDistance) * volumeFactor;
finalValue = Math.max(0.1, Math.min(1, finalValue));
data.push(finalValue);
2025-12-26 16:44:11 +08:00
}
audioDataForDisplay.value = data;
2025-12-26 18:41:58 +08:00
}, 50); // 更快的刷新率,更流畅
2025-12-26 16:44:11 +08:00
}
/**
2025-12-26 18:41:58 +08:00
* 处理录音数据并生成WAV文件
2025-12-26 16:44:11 +08:00
*/
const processAudioData = async () => {
2025-12-26 18:41:58 +08:00
if (isProcessing.value) return;
isProcessing.value = true;
try {
let audioBlob = null;
// #ifdef H5
// H5端合并所有音频样本并生成WAV
if (h5AudioChunks.length > 0) {
// 合并所有Float32Array
const totalLength = h5AudioChunks.reduce((sum, chunk) => sum + chunk.length, 0);
const mergedSamples = new Float32Array(totalLength);
let offset = 0;
h5AudioChunks.forEach(chunk => {
mergedSamples.set(chunk, offset);
offset += chunk.length;
});
// 生成WAV文件
const wavBuffer = encodeWAV(mergedSamples, 16000, 1, 16);
audioBlob = new Blob([wavBuffer], { type: 'audio/wav' });
console.log(`H5生成WAV文件: ${audioBlob.size} bytes, 时长: ${mergedSamples.length / 16000}`);
}
// #endif
2025-12-26 16:44:11 +08:00
2025-12-26 18:41:58 +08:00
// #ifndef H5
// APP/小程序端合并Int16数据并生成WAV
if (appAudioChunks.length > 0) {
// 合并所有Int16Array
const totalLength = appAudioChunks.reduce((sum, chunk) => sum + chunk.byteLength / 2, 0);
const mergedInt16 = new Int16Array(totalLength);
let offset = 0;
appAudioChunks.forEach(chunk => {
const int16Data = new Int16Array(chunk);
mergedInt16.set(int16Data, offset);
offset += int16Data.length;
});
// 转换为Float32用于生成WAV
const floatSamples = new Float32Array(mergedInt16.length);
for (let i = 0; i < mergedInt16.length; i++) {
floatSamples[i] = mergedInt16[i] / 32768;
2025-12-26 16:44:11 +08:00
}
2025-12-26 18:41:58 +08:00
// 生成WAV文件
const wavBuffer = encodeWAV(floatSamples, 16000, 1, 16);
audioBlob = new Blob([wavBuffer], { type: 'audio/wav' });
console.log(`APP生成WAV文件: ${audioBlob.size} bytes, 时长: ${floatSamples.length / 16000}`);
}
// #endif
if (audioBlob && audioBlob.size > 44) { // 确保至少包含WAV头部
audioData.value = audioBlob;
// 保存文件用于调试(可选)
// debugSaveWavFile(audioBlob);
// 发送到服务器进行识别
isProcessing.value = false
await sendToASR(audioBlob);
} else {
throw new Error('录音数据为空或无效');
2025-12-26 16:44:11 +08:00
}
2025-12-26 18:41:58 +08:00
} catch (error) {
console.error('处理音频数据失败:', error);
uni.showToast({
title: '音频处理失败,请重试',
icon: 'none'
});
} finally {
isProcessing.value = false;
appAudioChunks = [];
h5AudioChunks = [];
2025-12-26 16:44:11 +08:00
}
}
2025-12-26 18:41:58 +08:00
/**
* 调试用保存WAV文件
*/
const debugSaveWavFile = (blob) => {
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `recording_${Date.now()}.wav`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
console.log('WAV文件已保存用于调试');
}
2025-12-26 16:44:11 +08:00
/**
* 发送音频到ASR服务器
*/
const sendToASR = async (audioBlob) => {
try {
// 创建FormData
const formData = new FormData();
2025-12-26 18:41:58 +08:00
formData.append('file', audioBlob, 'recording.wav');
2025-12-26 16:44:11 +08:00
// 添加Token
const token = uni.getStorageSync('token') || '';
2025-12-26 18:41:58 +08:00
const asrUrl = `${config.baseUrl}/app/speech/asr`
2025-12-26 16:44:11 +08:00
const response = await fetch(asrUrl, {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`
},
body: formData
});
if (response.ok) {
const result = await response.json();
2025-12-26 18:41:58 +08:00
if(result.code == 200){
recognizedText.value = result.data || ''
}else{
$api.msg(result.msg || '识别失败')
}
2025-12-26 16:44:11 +08:00
} else {
2025-12-26 18:41:58 +08:00
const errorText = await response.text();
throw new Error(`ASR请求失败: ${response.status} - ${errorText}`);
2025-12-26 16:44:11 +08:00
}
} catch (error) {
console.error('ASR识别失败:', error);
}
}
/**
* 清理状态
*/
const cleanup = () => {
clearInterval(durationTimer);
isRecording.value = false;
isProcessing.value = false;
recordingDuration.value = 0;
volumeLevel.value = 0;
audioDataForDisplay.value = [];
2025-12-26 18:41:58 +08:00
if (recorderManager) {
recorderManager = null;
}
2025-12-26 16:44:11 +08:00
}
onUnmounted(() => {
if (isRecording.value) {
stopRecording();
}
cleanup();
})
return {
isRecording,
isProcessing,
recordingDuration,
volumeLevel,
recognizedText,
audioData,
2025-12-26 18:41:58 +08:00
audioDataForDisplay,
2025-12-26 16:44:11 +08:00
startRecording,
stopRecording,
cancelRecording
}
}