Compare commits

13 Commits

9 changed files with 3135 additions and 681 deletions

619
hook/useAudioSpeak-copy.js Normal file
View File

@@ -0,0 +1,619 @@
// useAudioSpeak.js
import { ref } from 'vue'
import globalConfig from '@/config.js';
import useUserStore from '@/stores/useUserStore';
/**
* TTS语音合成Hook (队列播放版本)
* @param {Object} config - TTS配置
* @param {string} config.apiUrl - 语音合成API地址
* @param {number} config.maxSegmentLength - 最大分段长度
* @param {number} config.minQueueSize - 最小队列缓存数量
* @param {number} config.maxRetry - 最大重试次数
* @returns {Object} TTS相关方法和状态
*/
export const useAudioSpeak = (config = {}) => {
const {
apiUrl = `${globalConfig.baseUrl}/app/speech/tts`,
maxSegmentLength = 30,
minQueueSize = 3, // 最小队列缓存数量
maxRetry = 2, // 最大重试次数
onStatusChange = () => {} // 状态变化回调
} = config
// 状态
const isSpeaking = ref(false)
const isPaused = ref(false)
const isLoading = ref(false)
// 播放状态
const currentText = ref('')
const currentSegmentIndex = ref(0)
const totalSegments = ref(0)
const progress = ref(0)
// 队列容器
let textQueue = [] // 等待转换的文本队列 [{text, index}]
let audioQueue = [] // 已经转换好的音频队列 [{blobUrl, text, index}]
// 控制标志
let isFetching = false // 是否正在请求音频
let isPlaying = false // 是否正在播放(逻辑状态)
let currentPlayingIndex = -1 // 当前正在播放的片段索引
let audioContext = null
let audioSource = null
let currentAudioUrl = '' // 当前播放的音频URL
// 文本提取工具函数
function extractSpeechText(markdown) {
if (!markdown || markdown.indexOf('job-json') === -1) {
return markdown;
}
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
const jobs = [];
let match;
let lastJobEndIndex = 0;
let firstJobStartIndex = -1;
while ((match = jobRegex.exec(markdown)) !== null) {
const jobStr = match[1];
try {
const job = JSON.parse(jobStr);
jobs.push(job);
if (firstJobStartIndex === -1) {
firstJobStartIndex = match.index;
}
lastJobEndIndex = jobRegex.lastIndex;
} catch (e) {
console.warn('JSON 解析失败', e);
}
}
const guideText = firstJobStartIndex > 0 ?
markdown.slice(0, firstJobStartIndex).trim() : '';
const endingText = lastJobEndIndex < markdown.length ?
markdown.slice(lastJobEndIndex).trim() : '';
const jobTexts = jobs.map((job, index) => {
// 处理薪资格式
let salaryText = job.salary;
if (salaryText) {
// 匹配 "XXXXX-XXXXX元/月" 格式
const rangeMatch = salaryText.match(/(\d+)-(\d+)元\/月/);
if (rangeMatch) {
const minSalary = parseInt(rangeMatch[1], 10);
const maxSalary = parseInt(rangeMatch[2], 10);
// 转换为千位单位
const minK = Math.round(minSalary / 1000);
const maxK = Math.round(maxSalary / 1000);
salaryText = `${minK}千到${maxK}千每月`;
}
// 如果不是 "XXXXX-XXXXX元/月" 格式,保持原样
}
return `${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${salaryText},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}`;
});
const finalTextParts = [];
if (guideText) finalTextParts.push(guideText);
finalTextParts.push(...jobTexts);
if (endingText) finalTextParts.push(endingText);
return finalTextParts.join('\n');
}
/**
* 智能文本切割
* 作用:合并短句减少请求次数,切分长句避免超时
*/
const _smartSplit = (text) => {
if (!text || typeof text !== 'string') return [];
const cleanText = text.replace(/\s+/g, ' ').trim();
// 先按标点粗分
const rawChunks = cleanText.split(/([。?!;\n\r]|……)/).filter((t) => t.trim());
const mergedChunks = [];
let temp = '';
for (let i = 0; i < rawChunks.length; i++) {
const part = rawChunks[i];
// 如果是标点,追加到上一句
if (/^[。?!;\n\r……]$/.test(part)) {
temp += part;
} else {
// 如果当前积累的句子太长超过50字先推入队列
if (temp.length > 50) {
mergedChunks.push(temp);
temp = part;
} else if (temp.length + part.length < 15) {
// 如果当前积累的太短少于15字则合并下一句
temp += part;
} else {
// 正常长度,推入
if (temp) mergedChunks.push(temp);
temp = part;
}
}
}
if (temp) mergedChunks.push(temp);
// 如果还有超过 maxSegmentLength 的,强制分割
const finalSegments = [];
mergedChunks.forEach(segment => {
if (segment.length <= maxSegmentLength) {
finalSegments.push(segment);
} else {
// 按字数强制分割
for (let i = 0; i < segment.length; i += maxSegmentLength) {
finalSegments.push(segment.substring(i, Math.min(i + maxSegmentLength, segment.length)));
}
}
});
return finalSegments.filter(seg => seg && seg.trim());
}
/**
* 统一状态通知
*/
const _updateState = (state = {}) => {
// 合并当前状态供 UI 使用
const payload = {
isPlaying: isPlaying,
isPaused: isPaused.value,
isLoading: state.isLoading || false,
msg: state.msg || '',
currentSegmentIndex: currentSegmentIndex.value,
totalSegments: totalSegments.value,
progress: progress.value
};
// 更新响应式状态
isLoading.value = state.isLoading || false;
// 调用回调
onStatusChange(payload);
}
/**
* 网络请求包装 (带重试机制)
*/
const _fetchAudioWithRetry = async (text, retries = 0) => {
try {
console.log(`📶正在请求音频: "${text.substring(0, 20)}..."`);
let Authorization = '';
if (useUserStore().token) {
Authorization = `${useUserStore().token}`;
}
const response = await fetch(`${apiUrl}?text=${encodeURIComponent(text)}`, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
'Authorization': encodeURIComponent(Authorization)
}
});
if (!response.ok) {
throw new Error(`HTTP错误! 状态码: ${response.status}`);
}
const audioBlob = await response.blob();
if (!audioBlob || audioBlob.size < 100) {
throw new Error('音频数据太小或无效');
}
console.log(`音频获取成功,大小: ${audioBlob.size} 字节`);
// 创建Blob URL
return URL.createObjectURL(audioBlob);
} catch (e) {
if (retries < maxRetry) {
console.warn(`重试 ${retries + 1} 次,文本: ${text.substring(0, 10)}...`);
return await _fetchAudioWithRetry(text, retries + 1);
}
throw e;
}
}
/**
* 缓冲维护器 (生产者)
* 始终保持 audioQueue 里有足够的音频
*/
const _maintainBuffer = async () => {
// 如果正在请求,或 文本队列没了,或 音频缓冲已达标,则停止请求
if (isFetching || textQueue.length === 0) {
return;
}
// 缓冲策略:如果音频队列里的数量少于 minQueueSize就继续下载
if (audioQueue.length >= minQueueSize) {
return;
}
isFetching = true;
const textItem = textQueue.shift(); // 从文本队列取出
try {
const blobUrl = await _fetchAudioWithRetry(textItem.text);
audioQueue.push({
blobUrl,
text: textItem.text,
index: textItem.index
});
console.log(`音频已添加到队列,当前队列长度: ${audioQueue.length}`);
// 如果当前因为没音频卡住了Loading状态立即尝试播放下一段
if (!audioSource && !isPaused.value && isPlaying) {
_playNext();
}
} catch (error) {
console.error('缓冲维护失败:', error);
// 即使失败,也要继续处理下一个文本,防止死锁
} finally {
isFetching = false;
// 递归调用:继续检查是否需要填充缓冲
_maintainBuffer();
}
}
/**
* 初始化音频上下文
*/
const initAudioContext = () => {
if (!audioContext || audioContext.state === 'closed') {
audioContext = new (window.AudioContext || window.webkitAudioContext)();
console.log('音频上下文已初始化');
}
return audioContext;
}
/**
* 播放控制器 (消费者)
*/
const _playNext = () => {
if (!isPlaying || isPaused.value) {
return;
}
// 尝试从缓冲队列获取下一个应该播放的片段
const nextIndex = currentPlayingIndex + 1;
const itemIndex = audioQueue.findIndex(item => item.index === nextIndex);
// 1. 缓冲耗尽:进入"加载中"等待状态
if (itemIndex === -1) {
if (textQueue.length === 0 && !isFetching) {
// 彻底播完了
console.log('所有音频播放完成');
stopAudio();
_updateState({ isPlaying: false, msg: '播放结束' });
} else {
// 还有文本没转完,说明网速慢了,正在缓冲
_updateState({ isLoading: true, msg: '缓冲中...' });
// 确保生产线在运行
_maintainBuffer();
}
return;
}
// 2. 正常播放
const item = audioQueue.splice(itemIndex, 1)[0];
// 释放上一段的内存
if (currentAudioUrl) {
URL.revokeObjectURL(currentAudioUrl);
currentAudioUrl = '';
}
currentPlayingIndex = item.index;
currentSegmentIndex.value = item.index;
currentAudioUrl = item.blobUrl;
// 更新进度
if (totalSegments.value > 0) {
progress.value = Math.floor(((item.index + 1) / totalSegments.value) * 100);
}
_updateState({
isLoading: false,
msg: `播放中: ${item.text.substring(0, 15)}...`
});
// 播放音频
_playAudio(item.blobUrl, item.text, item.index);
// 消费了一个,通知生产者补充库存
_maintainBuffer();
}
/**
* 播放音频
*/
const _playAudio = async (blobUrl, text, index) => {
return new Promise((resolve, reject) => {
if (!isPlaying || isPaused.value) {
resolve();
return;
}
initAudioContext();
const fileReader = new FileReader();
fileReader.onload = async (e) => {
try {
const arrayBuffer = e.target.result;
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// 如果在此期间被暂停或停止,直接返回
if (!isPlaying || isPaused.value) {
resolve();
return;
}
audioSource = audioContext.createBufferSource();
audioSource.buffer = audioBuffer;
audioSource.connect(audioContext.destination);
audioSource.onended = () => {
console.log(`${index + 1}个片段播放完成`);
audioSource = null;
// 播放下一段
_playNext();
resolve();
};
audioSource.onerror = (error) => {
console.error('音频播放错误:', error);
audioSource = null;
// 跳过错误片段,尝试下一段
setTimeout(() => {
_playNext();
}, 100);
reject(error);
};
console.log(`▶️开始播放第${index + 1}个片段: "${text.substring(0, 20)}..."`);
// 如果音频上下文被暂停,先恢复
if (audioContext.state === 'suspended') {
await audioContext.resume();
}
audioSource.start(0);
} catch (error) {
console.error('解码或播放音频失败:', error);
audioSource = null;
// 跳过错误片段,尝试下一段
setTimeout(() => {
_playNext();
}, 100);
reject(error);
}
};
fileReader.onerror = (error) => {
console.error('读取音频文件失败:', error);
audioSource = null;
// 跳过错误片段,尝试下一段
setTimeout(() => {
_playNext();
}, 100);
reject(error);
};
// 先获取Blob再读取
fetch(blobUrl)
.then(response => response.blob())
.then(blob => {
fileReader.readAsArrayBuffer(blob);
})
.catch(error => {
console.error('获取Blob失败:', error);
reject(error);
});
});
}
/**
* 核心入口:开始播放长文本
*/
const speak = async (text) => {
console.log('开始新的语音播报');
// 先停止当前播放
if (isPlaying) {
console.log('检测到正在播放,先停止');
stopAudio();
// 等待一小段时间确保资源清理完成
await new Promise(resolve => setTimeout(resolve, 200));
}
text = extractSpeechText(text);
console.log('开始语音播报:', text);
// 重置状态
isPlaying = true;
isPaused.value = false;
isLoading.value = true;
isSpeaking.value = true;
currentText.value = text;
progress.value = 0;
currentPlayingIndex = -1;
currentSegmentIndex.value = 0;
// 清空队列
textQueue = [];
audioQueue = [];
// 清理之前的Blob URL
if (currentAudioUrl) {
URL.revokeObjectURL(currentAudioUrl);
currentAudioUrl = '';
}
// 1. 智能切割文本
const segments = _smartSplit(text);
console.log('文本分段结果:', segments);
if (segments.length === 0) {
console.warn('没有有效的文本可以播报');
_updateState({ isPlaying: false, msg: '没有有效的文本' });
isSpeaking.value = false;
isLoading.value = false;
return;
}
totalSegments.value = segments.length;
// 2. 填充文本队列
segments.forEach((segment, index) => {
textQueue.push({ text: segment, index });
});
_updateState({ isLoading: true, msg: '初始化播放...' });
// 3. 启动"缓冲流水线"
_maintainBuffer();
// 4. 开始播放
setTimeout(() => {
_playNext();
}, 100);
}
/**
* 暂停播放
*/
const pause = () => {
if (isPlaying && !isPaused.value) {
isPaused.value = true;
if (audioContext) {
audioContext.suspend().then(() => {
_updateState({ isPaused: true, msg: '已暂停' });
});
} else {
_updateState({ isPaused: true, msg: '已暂停' });
}
}
}
/**
* 恢复播放
*/
const resume = () => {
if (isPlaying && isPaused.value) {
isPaused.value = false;
// 恢复播放。如果当前有音频源则直接resume否则调_playNext
if (audioContext && audioContext.state === 'suspended') {
audioContext.resume().then(() => {
_updateState({ isPaused: false, msg: '继续播放' });
});
} else if (audioSource) {
_updateState({ isPaused: false, msg: '继续播放' });
} else {
_playNext();
}
}
}
/**
* 停止播放
*/
const stopAudio = () => {
console.log('停止音频播放');
isPlaying = false;
isPaused.value = false;
isFetching = false;
isSpeaking.value = false;
isLoading.value = false;
// 停止音频源
if (audioSource) {
try {
audioSource.stop();
console.log('音频源已停止');
} catch (e) {
console.warn('停止音频源失败:', e);
}
audioSource = null;
}
// 暂停音频上下文
if (audioContext && audioContext.state !== 'closed') {
audioContext.suspend();
}
// 清理所有 Blob 内存
if (currentAudioUrl) {
URL.revokeObjectURL(currentAudioUrl);
currentAudioUrl = '';
}
// 清理音频队列中的Blob URL
audioQueue.forEach(item => {
if (item.blobUrl) {
URL.revokeObjectURL(item.blobUrl);
}
});
// 清空队列
textQueue = [];
audioQueue = [];
currentPlayingIndex = -1;
progress.value = 0;
_updateState({ isPlaying: false, msg: '已停止' });
}
/**
* 清理资源
*/
const cleanup = () => {
console.log('开始清理资源');
stopAudio();
if (audioContext && audioContext.state !== 'closed') {
audioContext.close();
console.log('音频上下文已关闭');
}
audioContext = null;
console.log('资源清理完成');
}
return {
// 状态
isSpeaking,
isPaused,
isLoading,
currentText,
currentSegmentIndex,
totalSegments,
progress,
// 方法
speak,
pause,
resume,
cancelAudio: stopAudio,
cleanup
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,657 @@
import {
ref,
onUnmounted
} from 'vue'
import config from '@/config'
export function useRealtimeRecorderOnce() {
// --- 状态定义 ---
const isRecording = ref(false)
const isProcessing = ref(false)
const recordingDuration = ref(0)
const volumeLevel = ref(0) // 0-100
const recognizedText = ref('')
const audioData = ref(null)
const audioDataForDisplay = ref([])
// --- 内部变量 ---
let durationTimer = null
// --- APP/小程序 变量 ---
let recorderManager = null;
let appAudioChunks = [];
// --- H5 变量 ---
let audioContext = null;
let mediaRecorder = null;
let h5Stream = null;
let h5AudioChunks = [];
let analyser = null;
let dataArray = null;
// --- 配置项 ---
const RECORD_CONFIG = {
duration: 600000,
sampleRate: 16000,
numberOfChannels: 1,
format: 'wav',
encodeBitRate: 16000,
frameSize: 4096
}
// --- WAV文件头函数 ---
const encodeWAV = (samples, sampleRate = 16000, numChannels = 1, bitsPerSample = 16) => {
const bytesPerSample = bitsPerSample / 8;
const blockAlign = numChannels * bytesPerSample;
const byteRate = sampleRate * blockAlign;
const dataSize = samples.length * bytesPerSample;
const buffer = new ArrayBuffer(44 + dataSize);
const view = new DataView(buffer);
// RIFF chunk descriptor
writeString(view, 0, 'RIFF');
view.setUint32(4, 36 + dataSize, true);
writeString(view, 8, 'WAVE');
// fmt sub-chunk
writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true); // Subchunk1Size (16 for PCM)
view.setUint16(20, 1, true); // AudioFormat (1 for PCM)
view.setUint16(22, numChannels, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, byteRate, true);
view.setUint16(32, blockAlign, true);
view.setUint16(34, bitsPerSample, true);
// data sub-chunk
writeString(view, 36, 'data');
view.setUint32(40, dataSize, true);
// Write audio samples
const volume = 1;
let offset = 44;
for (let i = 0; i < samples.length; i++) {
let sample = Math.max(-1, Math.min(1, samples[i]));
sample = sample * volume;
view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7FFF, true);
offset += 2;
}
return buffer;
}
const writeString = (view, offset, string) => {
for (let i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
const floatTo16BitPCM = (output, offset, input) => {
for (let i = 0; i < input.length; i++, offset += 2) {
const s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
// --- 音量计算函数 ---
const calculateVolumeFromFloat32 = (float32Array) => {
if (!float32Array || float32Array.length === 0) return 0;
let sum = 0;
const length = float32Array.length;
// 计算RMS (均方根)
for (let i = 0; i < length; i++) {
// 绝对值方法更敏感
const absValue = Math.abs(float32Array[i]);
sum += absValue * absValue;
}
const rms = Math.sqrt(sum / length);
// 调试打印原始RMS值
// console.log('Float32 RMS:', rms);
// 转换为0-100的值
// 使用对数刻度,使小音量变化更明显
let volume = 0;
if (rms > 0) {
// 使用对数转换:-60dB到0dB映射到0-100
const db = 20 * Math.log10(rms);
// 静音阈值约-60dB
if (db > -60) {
volume = Math.min(100, Math.max(0, (db + 60) / 0.6));
}
}
// 如果没有计算到值使用旧方法作为fallback
if (volume === 0 && rms > 0) {
volume = Math.min(100, Math.floor(rms * 500));
}
// 确保最小值为0
return Math.max(0, volume);
}
const calculateVolumeFromInt16 = (int16Array) => {
if (!int16Array || int16Array.length === 0) return 0;
let sum = 0;
const length = int16Array.length;
// 计算RMS
for (let i = 0; i < length; i++) {
const normalized = int16Array[i] / 32768; // 归一化到[-1, 1]
const absValue = Math.abs(normalized);
sum += absValue * absValue;
}
const rms = Math.sqrt(sum / length);
// 调试打印原始RMS值
// console.log('Int16 RMS:', rms);
// 转换为0-100的值
// 使用对数刻度
let volume = 0;
if (rms > 0) {
const db = 20 * Math.log10(rms);
if (db > -60) {
volume = Math.min(100, Math.max(0, (db + 60) / 0.6));
}
}
// 如果没有计算到值使用旧方法作为fallback
if (volume === 0 && rms > 0) {
volume = Math.min(100, Math.floor(rms * 500));
}
// 确保最小值为0
return Math.max(0, volume);
}
/**
* 开始录音 (入口)
*/
const startRecording = async () => {
if (isRecording.value) return
try {
recognizedText.value = ''
volumeLevel.value = 0
audioData.value = null
audioDataForDisplay.value = []
appAudioChunks = []
h5AudioChunks = []
// #ifdef H5
if (location.protocol !== 'https:' && location.hostname !== 'localhost') {
uni.showToast({
title: 'H5录音需要HTTPS环境',
icon: 'none'
});
return;
}
// #endif
// #ifdef H5
await startH5Recording();
// #endif
// #ifndef H5
startAppRecording();
// #endif
isRecording.value = true;
recordingDuration.value = 0;
durationTimer = setInterval(() => recordingDuration.value++, 1000);
// 启动波形显示更新
updateAudioDataForDisplay();
} catch (err) {
console.error('启动失败:', err);
uni.showToast({
title: '启动失败: ' + (err.message || ''),
icon: 'none'
});
cleanup();
}
}
/**
* H5录音实现 - 手动构建WAV文件
*/
const startH5Recording = async () => {
try {
// 1. 获取麦克风流
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 16000,
channelCount: 1,
echoCancellation: true,
noiseSuppression: true,
autoGainControl: false
}
});
h5Stream = stream;
// 2. 创建 AudioContext 用于处理音频
const AudioContext = window.AudioContext || window.webkitAudioContext;
audioContext = new AudioContext({
sampleRate: 16000,
latencyHint: 'interactive'
});
// 创建音频处理节点
const source = audioContext.createMediaStreamSource(stream);
// 创建分析器用于音量计算
analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.8;
dataArray = new Float32Array(analyser.frequencyBinCount);
source.connect(analyser);
// 创建脚本处理器用于收集音频数据
const processor = audioContext.createScriptProcessor(4096, 1, 1);
// 存储所有音频样本
let audioSamples = [];
processor.onaudioprocess = (e) => {
if (!isRecording.value) return;
// 获取输入数据
const inputData = e.inputBuffer.getChannelData(0);
// 计算音量
analyser.getFloatTimeDomainData(dataArray);
const volume = calculateVolumeFromFloat32(dataArray);
volumeLevel.value = volume;
// 收集音频样本
for (let i = 0; i < inputData.length; i++) {
audioSamples.push(inputData[i]);
}
// 存储当前音频数据块
const buffer = new Float32Array(inputData.length);
buffer.set(inputData);
h5AudioChunks.push(buffer);
};
source.connect(processor);
processor.connect(audioContext.destination);
// console.log('H5 16kHz WAV录音已启动');
} catch (err) {
console.error('H5录音启动失败:', err);
throw err;
}
}
/**
* 停止H5录音资源
*/
const stopH5Resources = () => {
// 断开所有连接
if (audioContext && audioContext.state !== 'closed') {
audioContext.close();
}
// 停止音轨
if (h5Stream) {
h5Stream.getTracks().forEach(track => track.stop());
}
audioContext = null;
analyser = null;
dataArray = null;
h5Stream = null;
}
/**
* APP/小程序录音实现
*/
const startAppRecording = () => {
recorderManager = uni.getRecorderManager();
recorderManager.onFrameRecorded((res) => {
const { frameBuffer } = res;
if (frameBuffer && frameBuffer.byteLength > 0) {
// 计算音量
const int16Data = new Int16Array(frameBuffer);
const volume = calculateVolumeFromInt16(int16Data);
volumeLevel.value = volume;
// 保存音频数据
appAudioChunks.push(frameBuffer);
}
});
recorderManager.onStart(() => {
// console.log('APP 16kHz WAV录音已开始');
});
recorderManager.onError((err) => {
console.error('APP录音报错:', err);
uni.showToast({
title: '录音失败: ' + err.errMsg,
icon: 'none'
});
cleanup();
});
recorderManager.start(RECORD_CONFIG);
}
/**
* 停止录音 (通用)
*/
const stopRecording = async () => {
if (!isRecording.value) return;
isRecording.value = false;
clearInterval(durationTimer);
// 停止硬件录音
stopHardwareResource();
// 处理录音数据
await processAudioData();
}
/**
* 取消录音
*/
const cancelRecording = () => {
if (!isRecording.value) return;
// console.log('取消录音 - 丢弃结果');
// 1. 停止硬件录音
stopHardwareResource();
// 2. 清理状态
recognizedText.value = '';
audioData.value = null;
audioDataForDisplay.value = [];
appAudioChunks = [];
h5AudioChunks = [];
// 3. 清理资源
cleanup();
}
/**
* 停止硬件资源
*/
const stopHardwareResource = () => {
// APP/小程序停止
if (recorderManager) {
recorderManager.stop();
}
// H5停止
// #ifdef H5
stopH5Resources();
// #endif
}
/**
* 更新音频数据显示
*/
const updateAudioDataForDisplay = () => {
const updateInterval = setInterval(() => {
if (!isRecording.value) {
clearInterval(updateInterval);
audioDataForDisplay.value = [];
return;
}
// 获取当前音量值
const currentVolume = volumeLevel.value;
// 调试:打印音量值
// console.log('Current Volume:', currentVolume);
// 生成适合 WaveDisplay 的数据
const data = [];
const center = 15; // 中心索引
const timeFactor = Date.now() / 150; // 更快的动画
// 根据音量动态调整波形强度
const volumeFactor = currentVolume / 100;
// 添加基础噪声,使波形在安静时也有轻微活动
const baseNoise = Math.random() * 0.1;
for (let i = 0; i < 31; i++) {
// 距离中心的位置
const distanceFromCenter = Math.abs(i - center) / center;
// 基础波形模式
const basePattern = 1 - Math.pow(distanceFromCenter, 1.2);
// 动态效果
const dynamicEffect = Math.sin(timeFactor + i * 0.3) * 0.3;
// 计算基础值
let value;
if (volumeFactor > 0.1) {
// 有音量时:音量因子占主导
value = volumeFactor * 0.8 * basePattern +
volumeFactor * 0.4 +
dynamicEffect * volumeFactor * 0.5;
} else {
// 安静时:使用动态效果和基础噪声
value = basePattern * 0.2 +
dynamicEffect * 0.1 +
baseNoise;
}
// 确保值在有效范围内
value = Math.max(0.15, Math.min(1, value));
// 随机微调
const randomVariance = volumeFactor > 0.1 ? 0.15 : 0.05;
value += (Math.random() - 0.5) * randomVariance;
value = Math.max(0.15, Math.min(1, value));
data.push(value);
}
audioDataForDisplay.value = data;
// 调试:检查生成的数据范围
// const min = Math.min(...data);
// const max = Math.max(...data);
// console.log(`Data range: ${min.toFixed(3)} - ${max.toFixed(3)}`);
}, 50);
}
/**
* 处理录音数据并生成WAV文件
*/
const processAudioData = async () => {
if (isProcessing.value) return;
isProcessing.value = true;
try {
let audioBlob = null;
// #ifdef H5
// H5端合并所有音频样本并生成WAV
if (h5AudioChunks.length > 0) {
// 合并所有Float32Array
const totalLength = h5AudioChunks.reduce((sum, chunk) => sum + chunk.length, 0);
const mergedSamples = new Float32Array(totalLength);
let offset = 0;
h5AudioChunks.forEach(chunk => {
mergedSamples.set(chunk, offset);
offset += chunk.length;
});
// 生成WAV文件
const wavBuffer = encodeWAV(mergedSamples, 16000, 1, 16);
audioBlob = new Blob([wavBuffer], { type: 'audio/wav' });
// console.log(`H5生成WAV文件: ${audioBlob.size} bytes, 时长: ${mergedSamples.length / 16000}秒`);
}
// #endif
// #ifndef H5
// APP/小程序端合并Int16数据并生成WAV
if (appAudioChunks.length > 0) {
// 合并所有Int16Array
const totalLength = appAudioChunks.reduce((sum, chunk) => sum + chunk.byteLength / 2, 0);
const mergedInt16 = new Int16Array(totalLength);
let offset = 0;
appAudioChunks.forEach(chunk => {
const int16Data = new Int16Array(chunk);
mergedInt16.set(int16Data, offset);
offset += int16Data.length;
});
// 转换为Float32用于生成WAV
const floatSamples = new Float32Array(mergedInt16.length);
for (let i = 0; i < mergedInt16.length; i++) {
floatSamples[i] = mergedInt16[i] / 32768;
}
// 生成WAV文件
const wavBuffer = encodeWAV(floatSamples, 16000, 1, 16);
audioBlob = new Blob([wavBuffer], { type: 'audio/wav' });
// console.log(`APP生成WAV文件: ${audioBlob.size} bytes, 时长: ${floatSamples.length / 16000}秒`);
}
// #endif
if (audioBlob && audioBlob.size > 44) { // 确保至少包含WAV头部
audioData.value = audioBlob;
// 保存文件用于调试(可选)
// debugSaveWavFile(audioBlob);
// 发送到服务器进行识别
await sendToASR(audioBlob);
} else {
throw new Error('录音数据为空或无效');
}
} catch (error) {
console.error('处理音频数据失败:', error);
uni.showToast({
title: '音频处理失败,请重试',
icon: 'none'
});
} finally {
isProcessing.value = false;
appAudioChunks = [];
h5AudioChunks = [];
}
}
/**
* 调试用保存WAV文件
*/
const debugSaveWavFile = (blob) => {
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `recording_${Date.now()}.wav`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
// console.log('WAV文件已保存用于调试');
}
/**
* 发送音频到ASR服务器
*/
const sendToASR = async (audioBlob) => {
try {
// 创建FormData
const formData = new FormData();
formData.append('file', audioBlob, 'recording.wav');
// 添加Token
const token = uni.getStorageSync('token') || '';
const asrUrl = `${config.baseUrl}/app/speech/asr`
const response = await fetch(asrUrl, {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`
},
body: formData
});
if (response.ok) {
const result = await response.json();
if(result.code == 200){
isProcessing.value = false
recognizedText.value = result.data || ''
}else{
$api.msg(result.msg || '识别失败')
}
} else {
const errorText = await response.text();
throw new Error(`ASR请求失败: ${response.status} - ${errorText}`);
}
} catch (error) {
console.error('ASR识别失败:', error);
}
}
/**
* 清理状态
*/
const cleanup = () => {
clearInterval(durationTimer);
isRecording.value = false;
isProcessing.value = false;
recordingDuration.value = 0;
volumeLevel.value = 0;
audioDataForDisplay.value = [];
if (recorderManager) {
recorderManager = null;
}
}
onUnmounted(() => {
if (isRecording.value) {
stopRecording();
}
cleanup();
})
return {
isRecording,
isProcessing,
recordingDuration,
volumeLevel,
recognizedText,
audioData,
audioDataForDisplay,
startRecording,
stopRecording,
cancelRecording
}
}

View File

@@ -50,7 +50,7 @@
"quickapp" : {},
/* */
"mp-weixin" : {
"appid" : "wxdbdcc6a10153c99b",
"appid" : "",
"setting" : {
"urlCheck" : false,
"es6" : true,

File diff suppressed because it is too large Load Diff

View File

@@ -20,8 +20,8 @@
>
{{ item }}
</view>
<view class="chat-item self" v-if="isRecording">
<view class="message">{{ recognizedText }} {{ lastFinalText }}</view>
<view class="chat-item self" v-if="isRecording || isProcessing">
<view class="message">{{ recognizedText || (isProcessing ? '正在识别语音...' : '正在录音 '+recordingDuration+'s') }}</view>
</view>
</view>
<scroll-view class="chat-list scrollView" :scroll-top="scrollTop" :scroll-y="true" scroll-with-animation>
@@ -118,9 +118,8 @@
</view>
</view>
</view>
<view class="chat-item self" v-if="isRecording">
<!-- <view class="message">{{ recognizedText }} {{ lastFinalText }}</view> -->
<view class="message">{{ recognizedText }}</view>
<view class="chat-item self" v-if="isRecording || isProcessing">
<view class="message">{{ recognizedText || (isProcessing ? '正在识别语音...' : '正在录音 '+recordingDuration+'s') }}</view>
</view>
<view v-if="isTyping" class="self">
<text class="message msg-loading">
@@ -275,7 +274,7 @@ import AudioWave from './AudioWave.vue';
import WaveDisplay from './WaveDisplay.vue';
import useScreenStore from '@/stores/useScreenStore'
const screenStore = useScreenStore();
import { useAudioRecorder } from '@/hook/useRealtimeRecorder.js';
import { useRealtimeRecorderOnce } from '@/hook/useRealtimeRecorderOnce.js';
import { useAudioSpeak } from '@/hook/useAudioSpeak.js';
// 全局
const { $api, navTo, throttle } = inject('globalFunction');
@@ -290,14 +289,25 @@ import { FileValidator } from '@/utils/fileValidator.js'; //文件校验
// 语音识别
const {
isRecording,
isProcessing,
startRecording,
stopRecording,
cancelRecording,
audioDataForDisplay,
volumeLevel,
recognizedText,
lastFinalText,
} = useAudioRecorder();
recordingDuration
} = useRealtimeRecorderOnce();
watch(recognizedText, (newText) => {
console.log(newText,'++++++++')
if (newText && newText.trim() && !isProcessing.value) {
setTimeout(() => {
sendMessage(newText);
}, 300);
}
});
// 语音合成
const { speak, pause, resume, isSpeaking, isPaused, isLoading, cancelAudio,cleanup } = useAudioSpeak();
@@ -355,7 +365,7 @@ onMounted(async () => {
})
onUnmounted(()=>{
console.log('清理TTS资源')
// console.log('清理TTS资源')
cleanup()
})
@@ -382,6 +392,8 @@ function showControll(index) {
return true;
}
const sendMessage = (text) => {
const values = textInput.value || text;
showfile.value = false;
@@ -660,19 +672,12 @@ const handleTouchMove = (e) => {
}
};
const handleTouchEnd = () => {
const handleTouchEnd = async () => {
if (status.value === 'cancel') {
console.log('取消发送');
cancelRecording();
} else {
stopRecording();
if (isAudioPermission.value) {
if (recognizedText.value) {
sendMessage(recognizedText.value);
} else {
$api.msg('说话时长太短');
}
}
await stopRecording();
}
status.value = 'idle';
};
@@ -737,15 +742,15 @@ function readMarkdown(value, index) {
speak(value);
return;
}
if (isPaused.value) {
resume();
resume();
} else {
// console.log(value, speechIndex.value, index, isPaused.value)
speak(value);
pause();
}
}
function stopMarkdown(value, index) {
pause(value);
pause()
speechIndex.value = index;
}
function refreshMarkdown(index) {

View File

@@ -246,7 +246,7 @@ onLoad((parmas) => {
});
onMounted(() => {
if (isMachineEnv) {
if (isMachineEnv.value) {
startCountdown();
startScanAnimation();
faceService.start(); // 自动开始初始化流程
@@ -308,7 +308,7 @@ const cancelLogin = () => {
// 切换登录方式
const switchLoginMethod = (method) => {
if (!isMachineEnv) {
if (!isMachineEnv.value) {
return;
}
if (loginMethod.value !== method) {

View File

@@ -39,7 +39,7 @@ const useChatGroupDBStore = defineStore("messageGroup", () => {
setTimeout(async () => {
if (!baseDB.isDBReady) await baseDB.initDB();
const result = await baseDB.db.getAll(tableName.value);
console.log('result', result)
// console.log('result', result)
// 1、判断是否有数据没数据请求服务器
if (result.length) {
console.warn('本地数据库存在数据')

View File

@@ -139,7 +139,7 @@ export function createRequest(url, data = {}, method = 'GET', loading = false, h
} = resData.data
if (code === 200) {
resolve(resData.data)
console.log(resData.data.data,'接口解密')
// console.log(resData.data.data,'接口解密')
return
}
if (msg) {