flat: update
This commit is contained in:
@@ -9,6 +9,7 @@ import {
|
||||
|
||||
import config from '@/config'
|
||||
|
||||
// Alibaba Cloud
|
||||
export function useAudioRecorder() {
|
||||
const isRecording = ref(false)
|
||||
const isStopping = ref(false)
|
||||
|
||||
348
hook/useRealtimeRecorder2.js
Normal file
348
hook/useRealtimeRecorder2.js
Normal file
@@ -0,0 +1,348 @@
|
||||
import {
|
||||
ref,
|
||||
onUnmounted
|
||||
} from 'vue'
|
||||
import {
|
||||
$api
|
||||
} from '../common/globalFunction'; // 你的请求封装
|
||||
import config from '@/config'
|
||||
|
||||
// 开源
|
||||
export function useAudioRecorder() {
|
||||
// --- 状态定义 ---
|
||||
const isRecording = ref(false)
|
||||
const isSocketConnected = ref(false)
|
||||
const recordingDuration = ref(0)
|
||||
const volumeLevel = ref(0) // 0-100
|
||||
const recognizedText = ref('')
|
||||
|
||||
// --- 内部变量 ---
|
||||
let socketTask = null
|
||||
let durationTimer = null
|
||||
|
||||
// --- APP/小程序 变量 ---
|
||||
let recorderManager = null;
|
||||
|
||||
// --- H5 变量 ---
|
||||
let audioContext = null;
|
||||
let scriptProcessor = null;
|
||||
let mediaStreamSource = null;
|
||||
let h5Stream = null;
|
||||
|
||||
// --- 配置项 ---
|
||||
const RECORD_CONFIG = {
|
||||
duration: 600000,
|
||||
sampleRate: 16000,
|
||||
numberOfChannels: 1,
|
||||
format: 'pcm',
|
||||
frameSize: 4096
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取 WebSocket 地址 (含 Token)
|
||||
*/
|
||||
const getWsUrl = async () => {
|
||||
let wsUrl = config.vioceBaseURl
|
||||
|
||||
// 拼接 Token
|
||||
const token = uni.getStorageSync('token') || '';
|
||||
if (token) {
|
||||
const separator = wsUrl.includes('?') ? '&' : '?';
|
||||
wsUrl = `${wsUrl}${separator}token=${encodeURIComponent(token)}`;
|
||||
}
|
||||
return wsUrl;
|
||||
}
|
||||
|
||||
/**
|
||||
* 开始录音 (入口)
|
||||
*/
|
||||
const startRecording = async () => {
|
||||
if (isRecording.value) return
|
||||
|
||||
try {
|
||||
recognizedText.value = ''
|
||||
volumeLevel.value = 0
|
||||
|
||||
// #ifdef H5
|
||||
if (location.protocol !== 'https:' && location.hostname !== 'localhost') {
|
||||
uni.showToast({
|
||||
title: 'H5录音需要HTTPS环境',
|
||||
icon: 'none'
|
||||
});
|
||||
return;
|
||||
}
|
||||
// #endif
|
||||
|
||||
const url = await getWsUrl()
|
||||
console.log('正在连接 ASR:', url)
|
||||
|
||||
await connectSocket(url);
|
||||
|
||||
} catch (err) {
|
||||
console.error('启动失败:', err);
|
||||
uni.showToast({
|
||||
title: '启动失败: ' + (err.message || ''),
|
||||
icon: 'none'
|
||||
});
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 连接 WebSocket
|
||||
*/
|
||||
const connectSocket = (url) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
socketTask = uni.connectSocket({
|
||||
url: url,
|
||||
success: () => console.log('Socket 连接请求发送'),
|
||||
fail: (err) => reject(err)
|
||||
});
|
||||
|
||||
socketTask.onOpen((res) => {
|
||||
console.log('WebSocket 已连接');
|
||||
isSocketConnected.value = true;
|
||||
|
||||
// #ifdef H5
|
||||
startH5Recording().then(() => resolve()).catch(err => {
|
||||
socketTask.close();
|
||||
reject(err);
|
||||
});
|
||||
// #endif
|
||||
|
||||
// #ifndef H5
|
||||
startAppRecording();
|
||||
resolve();
|
||||
// #endif
|
||||
});
|
||||
|
||||
socketTask.onMessage((res) => {
|
||||
// 接收文本结果
|
||||
if (res.data) {
|
||||
recognizedText.value = res.data;
|
||||
}
|
||||
});
|
||||
|
||||
socketTask.onError((err) => {
|
||||
console.error('Socket 错误:', err);
|
||||
isSocketConnected.value = false;
|
||||
stopRecording();
|
||||
});
|
||||
|
||||
socketTask.onClose(() => {
|
||||
isSocketConnected.value = false;
|
||||
console.log('Socket 已关闭');
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
const startH5Recording = async () => {
|
||||
try {
|
||||
// 1. 获取麦克风流
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: true
|
||||
});
|
||||
h5Stream = stream;
|
||||
|
||||
// 2. 创建 AudioContext
|
||||
const AudioContext = window.AudioContext || window.webkitAudioContext;
|
||||
audioContext = new AudioContext({
|
||||
sampleRate: 16000
|
||||
});
|
||||
|
||||
mediaStreamSource = audioContext.createMediaStreamSource(stream);
|
||||
scriptProcessor = audioContext.createScriptProcessor(4096, 1, 1);
|
||||
|
||||
scriptProcessor.onaudioprocess = (event) => {
|
||||
if (!isSocketConnected.value || !socketTask) return;
|
||||
|
||||
const inputData = event.inputBuffer.getChannelData(0);
|
||||
|
||||
calculateVolume(inputData, true);
|
||||
|
||||
const buffer = new ArrayBuffer(inputData.length * 2);
|
||||
const view = new DataView(buffer);
|
||||
for (let i = 0; i < inputData.length; i++) {
|
||||
let s = Math.max(-1, Math.min(1, inputData[i]));
|
||||
view.setInt16(i * 2, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
|
||||
}
|
||||
|
||||
socketTask.send({
|
||||
data: buffer,
|
||||
fail: (e) => console.error('发送音频失败', e)
|
||||
});
|
||||
};
|
||||
|
||||
mediaStreamSource.connect(scriptProcessor);
|
||||
scriptProcessor.connect(audioContext.destination);
|
||||
|
||||
isRecording.value = true;
|
||||
recordingDuration.value = 0;
|
||||
durationTimer = setInterval(() => recordingDuration.value++, 1000);
|
||||
|
||||
console.log('H5 录音已启动');
|
||||
|
||||
} catch (err) {
|
||||
console.error('H5 录音启动失败:', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
const stopH5Resources = () => {
|
||||
if (scriptProcessor) scriptProcessor.disconnect();
|
||||
if (mediaStreamSource) mediaStreamSource.disconnect();
|
||||
if (audioContext) audioContext.close();
|
||||
if (h5Stream) h5Stream.getTracks().forEach(track => track.stop());
|
||||
|
||||
scriptProcessor = null;
|
||||
mediaStreamSource = null;
|
||||
audioContext = null;
|
||||
h5Stream = null;
|
||||
}
|
||||
|
||||
const startAppRecording = () => {
|
||||
recorderManager = uni.getRecorderManager();
|
||||
|
||||
recorderManager.onFrameRecorded((res) => {
|
||||
const {
|
||||
frameBuffer
|
||||
} = res;
|
||||
|
||||
calculateVolume(frameBuffer, false);
|
||||
|
||||
if (isSocketConnected.value && socketTask) {
|
||||
socketTask.send({
|
||||
data: frameBuffer
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
recorderManager.onStart(() => {
|
||||
console.log('APP 录音已开始');
|
||||
isRecording.value = true;
|
||||
recordingDuration.value = 0;
|
||||
durationTimer = setInterval(() => recordingDuration.value++, 1000);
|
||||
});
|
||||
|
||||
recorderManager.onError((err) => {
|
||||
console.error('APP 录音报错:', err);
|
||||
cleanup();
|
||||
});
|
||||
|
||||
recorderManager.start(RECORD_CONFIG);
|
||||
}
|
||||
const stopHardwareResource = () => {
|
||||
// APP/小程序停止
|
||||
if (recorderManager) {
|
||||
recorderManager.stop();
|
||||
}
|
||||
|
||||
// H5停止
|
||||
// #ifdef H5
|
||||
if (scriptProcessor) scriptProcessor.disconnect();
|
||||
if (mediaStreamSource) mediaStreamSource.disconnect();
|
||||
if (audioContext) audioContext.close();
|
||||
if (h5Stream) h5Stream.getTracks().forEach(track => track.stop());
|
||||
|
||||
scriptProcessor = null;
|
||||
mediaStreamSource = null;
|
||||
audioContext = null;
|
||||
h5Stream = null;
|
||||
// #endif
|
||||
}
|
||||
|
||||
/**
|
||||
* 停止录音 (通用)
|
||||
*/
|
||||
const stopRecording = () => {
|
||||
// 停止 APP 录音
|
||||
if (recorderManager) {
|
||||
recorderManager.stop();
|
||||
}
|
||||
|
||||
// 停止 H5 录音资源
|
||||
// #ifdef H5
|
||||
stopH5Resources();
|
||||
// #endif
|
||||
|
||||
// 关闭 Socket
|
||||
if (socketTask) {
|
||||
socketTask.close();
|
||||
}
|
||||
|
||||
cleanup();
|
||||
}
|
||||
|
||||
const cancelRecording = () => {
|
||||
if (!isRecording.value) return;
|
||||
|
||||
console.log('取消录音 - 丢弃结果');
|
||||
|
||||
// 1. 停止硬件录音
|
||||
stopHardwareResource();
|
||||
|
||||
// 2. 强制关闭 Socket
|
||||
if (socketTask) {
|
||||
socketTask.close();
|
||||
}
|
||||
|
||||
// 3. 关键:清空已识别的文本
|
||||
recognizedText.value = '';
|
||||
|
||||
// 4. 清理资源
|
||||
cleanup();
|
||||
}
|
||||
|
||||
/**
|
||||
* 清理状态
|
||||
*/
|
||||
const cleanup = () => {
|
||||
clearInterval(durationTimer);
|
||||
isRecording.value = false;
|
||||
isSocketConnected.value = false;
|
||||
socketTask = null;
|
||||
recorderManager = null;
|
||||
volumeLevel.value = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* 计算音量 (兼容 Float32 和 Int16/ArrayBuffer)
|
||||
*/
|
||||
const calculateVolume = (data, isFloat32) => {
|
||||
let sum = 0;
|
||||
let length = 0;
|
||||
|
||||
if (isFloat32) {
|
||||
length = data.length;
|
||||
for (let i = 0; i < length; i += 10) {
|
||||
sum += Math.abs(data[i]);
|
||||
}
|
||||
volumeLevel.value = Math.min(100, Math.floor((sum / (length / 10)) * 100 * 3));
|
||||
} else {
|
||||
const int16Data = new Int16Array(data);
|
||||
length = int16Data.length;
|
||||
for (let i = 0; i < length; i += 10) {
|
||||
sum += Math.abs(int16Data[i]);
|
||||
}
|
||||
const avg = sum / (length / 10);
|
||||
volumeLevel.value = Math.min(100, Math.floor((avg / 10000) * 100));
|
||||
}
|
||||
}
|
||||
|
||||
onUnmounted(() => {
|
||||
if (isRecording.value) {
|
||||
stopRecording();
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
isRecording,
|
||||
isSocketConnected,
|
||||
recordingDuration,
|
||||
volumeLevel,
|
||||
recognizedText,
|
||||
startRecording,
|
||||
stopRecording,
|
||||
cancelRecording
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,9 @@ import {
|
||||
onUnload
|
||||
} from '@dcloudio/uni-app'
|
||||
import WavDecoder from '@/lib/wav-decoder@1.3.0.js'
|
||||
import config from '@/config'
|
||||
|
||||
export function useTTSPlayer(wsUrl) {
|
||||
export function useTTSPlayer() {
|
||||
const isSpeaking = ref(false)
|
||||
const isPaused = ref(false)
|
||||
const isComplete = ref(false)
|
||||
@@ -89,12 +90,13 @@ export function useTTSPlayer(wsUrl) {
|
||||
|
||||
const initWebSocket = () => {
|
||||
const thisPlayId = currentPlayId
|
||||
socket = new WebSocket(wsUrl)
|
||||
socket = new WebSocket(config.speechSynthesis)
|
||||
socket.binaryType = 'arraybuffer'
|
||||
|
||||
socket.onopen = () => {
|
||||
if (pendingText && thisPlayId === activePlayId) {
|
||||
const seepdText = extractSpeechText(pendingText)
|
||||
console.log(seepdText)
|
||||
socket.send(seepdText)
|
||||
pendingText = null
|
||||
}
|
||||
|
||||
333
hook/useTTSPlayer2.js
Normal file
333
hook/useTTSPlayer2.js
Normal file
@@ -0,0 +1,333 @@
|
||||
import {
|
||||
ref,
|
||||
onUnmounted,
|
||||
onMounted
|
||||
} from 'vue'
|
||||
// 如果是 uni-app 环境,保留这些导入;如果是纯 Web Vue3,可以移除
|
||||
import {
|
||||
onHide,
|
||||
onUnload
|
||||
} from '@dcloudio/uni-app'
|
||||
import config from '@/config'
|
||||
|
||||
/**
|
||||
* Piper TTS 播放钩子 (WebSocket MSE 流式版 - 含 cancelAudio)
|
||||
* 依赖: 后端必须去除 MP3 ID3 标签 (-map_metadata -1)
|
||||
*/
|
||||
export function useTTSPlayer() {
|
||||
// 状态管理
|
||||
const isSpeaking = ref(false)
|
||||
const isPaused = ref(false)
|
||||
const isLoading = ref(false)
|
||||
|
||||
// 核心对象
|
||||
let audio = null
|
||||
let mediaSource = null
|
||||
let sourceBuffer = null
|
||||
let ws = null
|
||||
|
||||
// 缓冲队列管理
|
||||
let bufferQueue = []
|
||||
let isAppending = false
|
||||
let isStreamEnded = false
|
||||
|
||||
// 初始化 Audio 监听器 (只运行一次)
|
||||
const initAudioElement = () => {
|
||||
if (!audio && typeof window !== 'undefined') {
|
||||
audio = new Audio()
|
||||
|
||||
// 错误监听
|
||||
audio.addEventListener('error', (e) => {
|
||||
// 如果是手动停止导致的 error (src 被置空),忽略
|
||||
if (!audio.src) return
|
||||
console.error('Audio Player Error:', e)
|
||||
resetState()
|
||||
})
|
||||
|
||||
// 播放结束监听
|
||||
audio.addEventListener('ended', () => {
|
||||
resetState()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 核心朗读方法 (WebSocket)
|
||||
* @param {string} text - 要朗读的文本
|
||||
*/
|
||||
const speak = async (text) => {
|
||||
if (!text) return
|
||||
|
||||
// 1. 提取文本
|
||||
const processedText = extractSpeechText(text)
|
||||
if (!processedText) return
|
||||
|
||||
// 2. 彻底清理旧状态
|
||||
cancelAudio()
|
||||
initAudioElement()
|
||||
|
||||
isLoading.value = true
|
||||
isSpeaking.value = true
|
||||
isPaused.value = false
|
||||
isStreamEnded = false
|
||||
|
||||
// 3. 检查环境
|
||||
if (!window.MediaSource || !window.WebSocket) {
|
||||
console.error('当前环境不支持 MediaSource 或 WebSocket')
|
||||
resetState()
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
// 4. 初始化 MSE
|
||||
mediaSource = new MediaSource()
|
||||
// 绑定 MSE 到 Audio
|
||||
audio.src = URL.createObjectURL(mediaSource)
|
||||
|
||||
// 监听 MSE 打开事件
|
||||
mediaSource.addEventListener('sourceopen', () => {
|
||||
// 防止多次触发
|
||||
if (mediaSource.sourceBuffers.length > 0) return
|
||||
startWebSocketStream(processedText)
|
||||
})
|
||||
|
||||
// 尝试播放 (处理浏览器自动播放策略)
|
||||
const playPromise = audio.play()
|
||||
if (playPromise !== undefined) {
|
||||
playPromise.catch(e => {
|
||||
console.warn('自动播放被拦截 (需用户交互):', e)
|
||||
// 保持 isSpeaking 为 true,UI 显示播放按钮,用户点击后调用 resume() 即可
|
||||
})
|
||||
}
|
||||
|
||||
} catch (err) {
|
||||
console.error('TTS Initialization Failed:', err)
|
||||
cancelAudio()
|
||||
}
|
||||
}
|
||||
|
||||
// 启动 WebSocket 流程
|
||||
const startWebSocketStream = (text) => {
|
||||
const mime = 'audio/mpeg'
|
||||
|
||||
// 4.1 创建 SourceBuffer
|
||||
try {
|
||||
sourceBuffer = mediaSource.addSourceBuffer(mime)
|
||||
sourceBuffer.addEventListener('updateend', () => {
|
||||
isAppending = false
|
||||
processQueue()
|
||||
})
|
||||
} catch (e) {
|
||||
console.error('SourceBuffer Create Failed:', e)
|
||||
return
|
||||
}
|
||||
|
||||
// 4.2 计算 WebSocket 地址
|
||||
let baseUrl = config.speechSynthesis2 || ''
|
||||
baseUrl = baseUrl.replace(/\/$/, '')
|
||||
const wsUrl = baseUrl.replace(/^http/, 'ws') + '/ws/synthesize'
|
||||
|
||||
// 4.3 建立连接
|
||||
ws = new WebSocket(wsUrl)
|
||||
ws.binaryType = 'arraybuffer' // 关键
|
||||
|
||||
ws.onopen = () => {
|
||||
// console.log('WS Open')
|
||||
ws.send(JSON.stringify({
|
||||
text: text,
|
||||
speaker_id: 0,
|
||||
length_scale: 1.0,
|
||||
noise_scale: 0.667
|
||||
}))
|
||||
isLoading.value = false
|
||||
}
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
if (event.data instanceof ArrayBuffer) {
|
||||
bufferQueue.push(event.data)
|
||||
processQueue()
|
||||
}
|
||||
}
|
||||
|
||||
ws.onerror = (e) => {
|
||||
console.error('WS Error:', e)
|
||||
cancelAudio()
|
||||
}
|
||||
|
||||
ws.onclose = () => {
|
||||
// console.log('WS Closed')
|
||||
isStreamEnded = true
|
||||
// 检查是否需要结束 MSE 流
|
||||
checkEndOfStream()
|
||||
}
|
||||
}
|
||||
|
||||
// 处理缓冲队列
|
||||
const processQueue = () => {
|
||||
if (!sourceBuffer || sourceBuffer.updating || bufferQueue.length === 0) {
|
||||
// 如果队列空了,且流已结束,尝试结束 MSE
|
||||
if (bufferQueue.length === 0 && isStreamEnded && !sourceBuffer.updating) {
|
||||
checkEndOfStream()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
isAppending = true
|
||||
const chunk = bufferQueue.shift()
|
||||
|
||||
try {
|
||||
sourceBuffer.appendBuffer(chunk)
|
||||
} catch (e) {
|
||||
// console.error('AppendBuffer Error:', e)
|
||||
isAppending = false
|
||||
}
|
||||
}
|
||||
|
||||
// 结束 MSE 流
|
||||
const checkEndOfStream = () => {
|
||||
if (mediaSource && mediaSource.readyState === 'open' && bufferQueue.length === 0 && !sourceBuffer
|
||||
?.updating) {
|
||||
try {
|
||||
mediaSource.endOfStream()
|
||||
} catch (e) {}
|
||||
}
|
||||
}
|
||||
|
||||
const pause = () => {
|
||||
if (audio && !audio.paused) {
|
||||
audio.pause()
|
||||
isPaused.value = true
|
||||
isSpeaking.value = false
|
||||
}
|
||||
}
|
||||
|
||||
const resume = () => {
|
||||
if (audio && audio.paused) {
|
||||
audio.play()
|
||||
isPaused.value = false
|
||||
isSpeaking.value = true
|
||||
}
|
||||
}
|
||||
|
||||
// === 新增/核心方法:取消并停止 ===
|
||||
const cancelAudio = () => {
|
||||
// 1. 断开 WebSocket (停止数据接收)
|
||||
if (ws) {
|
||||
// 移除监听器防止报错
|
||||
ws.onclose = null
|
||||
ws.onerror = null
|
||||
ws.onmessage = null
|
||||
ws.close()
|
||||
ws = null
|
||||
}
|
||||
|
||||
// 2. 停止音频播放
|
||||
if (audio) {
|
||||
audio.pause()
|
||||
// 释放 Blob URL 内存
|
||||
if (audio.src) {
|
||||
URL.revokeObjectURL(audio.src)
|
||||
audio.removeAttribute('src')
|
||||
}
|
||||
audio.currentTime = 0
|
||||
}
|
||||
|
||||
// 3. 清理 MSE 对象
|
||||
if (mediaSource) {
|
||||
try {
|
||||
if (mediaSource.readyState === 'open') {
|
||||
mediaSource.endOfStream()
|
||||
}
|
||||
} catch (e) {}
|
||||
mediaSource = null
|
||||
}
|
||||
|
||||
sourceBuffer = null
|
||||
bufferQueue = []
|
||||
isAppending = false
|
||||
isStreamEnded = false
|
||||
|
||||
// 4. 重置 UI 状态
|
||||
resetState()
|
||||
}
|
||||
|
||||
// 只是重置 UI 变量的辅助函数
|
||||
const resetState = () => {
|
||||
isSpeaking.value = false
|
||||
isPaused.value = false
|
||||
isLoading.value = false
|
||||
}
|
||||
|
||||
// 别名 stop -> cancelAudio (保持兼容性)
|
||||
const stop = cancelAudio
|
||||
|
||||
// === 生命周期 ===
|
||||
onMounted(() => {
|
||||
initAudioElement()
|
||||
})
|
||||
|
||||
onUnmounted(() => {
|
||||
cancelAudio()
|
||||
audio = null
|
||||
})
|
||||
|
||||
if (typeof onHide === 'function') onHide(cancelAudio)
|
||||
if (typeof onUnload === 'function') onUnload(cancelAudio)
|
||||
|
||||
return {
|
||||
speak,
|
||||
pause,
|
||||
resume,
|
||||
stop,
|
||||
cancelAudio, // 新增导出
|
||||
isSpeaking,
|
||||
isPaused,
|
||||
isLoading
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 提取文本逻辑
|
||||
*/
|
||||
function extractSpeechText(markdown) {
|
||||
if (!markdown || markdown.indexOf('job-json') === -1) {
|
||||
return markdown;
|
||||
}
|
||||
|
||||
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
|
||||
const jobs = [];
|
||||
let match;
|
||||
let lastJobEndIndex = 0;
|
||||
let firstJobStartIndex = -1;
|
||||
|
||||
while ((match = jobRegex.exec(markdown)) !== null) {
|
||||
const jobStr = match[1];
|
||||
try {
|
||||
const job = JSON.parse(jobStr);
|
||||
jobs.push(job);
|
||||
if (firstJobStartIndex === -1) {
|
||||
firstJobStartIndex = match.index;
|
||||
}
|
||||
lastJobEndIndex = jobRegex.lastIndex;
|
||||
} catch (e) {
|
||||
console.warn('JSON 解析失败', e);
|
||||
}
|
||||
}
|
||||
|
||||
const guideText = firstJobStartIndex > 0 ?
|
||||
markdown.slice(0, firstJobStartIndex).trim() : '';
|
||||
|
||||
const endingText = lastJobEndIndex < markdown.length ?
|
||||
markdown.slice(lastJobEndIndex).trim() : '';
|
||||
|
||||
const jobTexts = jobs.map((job, index) => {
|
||||
return `第 ${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}。`;
|
||||
});
|
||||
|
||||
const finalTextParts = [];
|
||||
if (guideText) finalTextParts.push(guideText);
|
||||
finalTextParts.push(...jobTexts);
|
||||
if (endingText) finalTextParts.push(endingText);
|
||||
|
||||
return finalTextParts.join('\n');
|
||||
}
|
||||
Reference in New Issue
Block a user