This commit is contained in:
2025-10-24 09:34:42 +08:00
commit c0e46d1ae7
282 changed files with 33820 additions and 0 deletions

71
hook/useColumnCount.js Normal file
View File

@@ -0,0 +1,71 @@
// composables/useColumnCount.js
import {
ref,
onMounted,
onUnmounted,
watch
} from 'vue'
export function useColumnCount(onChange = () => {}) {
const columnCount = ref(0)
const columnSpace = ref(2)
// const calcColumn = () => {
// const width = uni.getSystemInfoSync().windowWidth
// console.log(width)
// const count = Math.min(5, Math.floor(width / 375) + 1)
// if (count !== columnCount.value) {
// columnCount.value = count < 2 ? 2 : count
// }
// }
const calcColumn = () => {
const width = uni.getSystemInfoSync().windowWidth
let count = 2
if (width >= 1000) {
count = 5
} else if (width >= 750) {
count = 4
} else if (width >= 500) {
count = 3
} else {
count = 2
}
if (count !== columnCount.value) {
columnCount.value = count
}
// 计算间距count=2 => 1count=5 => 2中间线性插值
const spacing = 2 - (count - 2) * (1 / 3)
// console.log('列数:', count, '间距:', spacing.toFixed(2))
columnSpace.value = spacing
}
onMounted(() => {
columnCount.value = 2
calcColumn()
// if (process.client) {
window.addEventListener('resize', calcColumn)
// }
})
onUnmounted(() => {
// if (process.client) {
window.removeEventListener('resize', calcColumn)
// }
})
// 列数变化时执行回调
watch(columnCount, (newVal, oldVal) => {
if (newVal !== oldVal) {
onChange(newVal)
}
})
return {
columnCount,
columnSpace
}
}

173
hook/usePagination.js Normal file
View File

@@ -0,0 +1,173 @@
import {
ref,
reactive,
watch,
isRef,
nextTick
} from 'vue'
export function usePagination(
requestFn,
transformFn,
options = {}
) {
const list = ref([])
const loading = ref(false)
const error = ref(false)
const finished = ref(false)
const firstLoading = ref(true)
const empty = ref(false)
const {
pageSize = 10,
search = {},
autoWatchSearch = false,
debounceTime = 300,
autoFetch = false,
// 字段映射
dataKey = 'rows',
totalKey = 'total',
// 分页字段名映射
pageField = 'current',
sizeField = 'pageSize',
onBeforeRequest,
onAfterRequest
} = options
const pageState = reactive({
page: 1,
pageSize: isRef(pageSize) ? pageSize.value : pageSize,
total: 0,
maxPage: 1,
search: isRef(search) ? search.value : search
})
let debounceTimer = null
const fetchData = async (type = 'refresh') => {
if (loading.value) return Promise.resolve()
console.log(type)
loading.value = true
error.value = false
if (typeof onBeforeRequest === 'function') {
try {
onBeforeRequest(type, pageState)
} catch (err) {
console.warn('onBeforeRequest 执行异常:', err)
}
}
if (type === 'refresh') {
pageState.page = 1
finished.value = false
if (list.value.length === 0) {
firstLoading.value = true
}
} else if (type === 'loadMore') {
if (pageState.page >= pageState.maxPage) {
loading.value = false
finished.value = true
return Promise.resolve('no more')
}
pageState.page += 1
}
const params = {
...pageState.search,
[pageField]: pageState.page,
[sizeField]: pageState.pageSize,
}
try {
const res = await requestFn(params)
const rawData = res[dataKey]
const total = res[totalKey] || 99999999
console.log(total, rawData)
const data = typeof transformFn === 'function' ? transformFn(rawData) : rawData
if (type === 'refresh') {
list.value = data
} else {
list.value.push(...data)
}
pageState.total = total
pageState.maxPage = Math.ceil(total / pageState.pageSize)
finished.value = list.value.length >= total
empty.value = list.value.length === 0
} catch (err) {
console.error('分页请求失败:', err)
error.value = true
} finally {
loading.value = false
firstLoading.value = false
if (typeof onAfterRequest === 'function') {
try {
onAfterRequest(type, pageState, {
error: error.value
})
} catch (err) {
console.warn('onAfterRequest 执行异常:', err)
}
}
}
}
const refresh = () => fetchData('refresh')
const loadMore = () => fetchData('loadMore')
const resetPagination = () => {
list.value = []
pageState.page = 1
pageState.total = 0
pageState.maxPage = 1
finished.value = false
error.value = false
firstLoading.value = true
empty.value = false
}
if (autoWatchSearch && isRef(search)) {
watch(search, (newVal) => {
pageState.search = newVal
clearTimeout(debounceTimer)
debounceTimer = setTimeout(() => {
refresh()
}, debounceTime)
}, {
deep: true
})
}
watch(pageSize, (newVal) => {
pageState.pageSize = newVal
}, {
deep: true
})
if (autoFetch) {
nextTick(() => {
refresh()
})
}
return {
list,
loading,
error,
finished,
firstLoading,
empty,
pageState,
refresh,
loadMore,
resetPagination
}
}

258
hook/useRealtimeRecorder.js Normal file
View File

@@ -0,0 +1,258 @@
import {
ref,
onUnmounted
} from 'vue'
import {
$api,
} from '../common/globalFunction';
import config from '@/config'
export function useAudioRecorder() {
const isRecording = ref(false)
const isStopping = ref(false)
const isSocketConnected = ref(false)
const recordingDuration = ref(0)
const audioDataForDisplay = ref(new Array(16).fill(0))
const volumeLevel = ref(0)
const recognizedText = ref('')
const lastFinalText = ref('')
let audioStream = null
let audioContext = null
let audioInput = null
let scriptProcessor = null
let websocket = null
let durationTimer = null
const generateUUID = () => {
return ([1e7] + -1e3 + -4e3 + -8e3 + -1e11)
.replace(/[018]/g, c =>
(c ^ crypto.getRandomValues(new Uint8Array(1))[0] & 15 >> c / 4).toString(16)
).replace(/-/g, '')
}
const fetchWsUrl = async () => {
const res = await $api.createRequest('/app/speech/getToken')
if (res.code !== 200) throw new Error('无法获取语音识别 wsUrl')
const wsUrl = res.msg
return wsUrl
}
function extractWsParams(wsUrl) {
const url = new URL(wsUrl)
const appkey = url.searchParams.get('appkey')
const token = url.searchParams.get('token')
return {
appkey,
token
}
}
const connectWebSocket = async () => {
const wsUrl = await fetchWsUrl()
const {
appkey,
token
} = extractWsParams(wsUrl)
return new Promise((resolve, reject) => {
websocket = new WebSocket(wsUrl)
websocket.binaryType = 'arraybuffer'
websocket.onopen = () => {
isSocketConnected.value = true
// 发送 StartTranscription 消息(参考 demo.html
const startTranscriptionMessage = {
header: {
appkey: appkey, // 不影响使用,可留空或由 wsUrl 带入
namespace: 'SpeechTranscriber',
name: 'StartTranscription',
task_id: generateUUID(),
message_id: generateUUID()
},
payload: {
format: 'pcm',
sample_rate: 16000,
enable_intermediate_result: true,
enable_punctuation_prediction: true,
enable_inverse_text_normalization: true
}
}
websocket.send(JSON.stringify(startTranscriptionMessage))
resolve()
}
websocket.onerror = (e) => {
isSocketConnected.value = false
reject(e)
}
websocket.onclose = () => {
isSocketConnected.value = false
}
websocket.onmessage = (e) => {
const msg = JSON.parse(e.data)
const name = msg?.header?.name
const payload = msg?.payload
switch (name) {
case 'TranscriptionResultChanged': {
// 中间识别文本(可选:使用 stash_result.unfixedText 更精确)
const text = payload?.unfixed_result || payload?.result || ''
lastFinalText.value = text
break
}
case 'SentenceBegin': {
// 可选:开始新的一句,重置状态
// console.log('开始新的句子识别')
break
}
case 'SentenceEnd': {
const text = payload?.result || ''
const confidence = payload?.confidence || 0
if (text && confidence > 0.5) {
recognizedText.value += text
lastFinalText.value = ''
// console.log('识别完成:', {
// text,
// confidence
// })
}
break
}
case 'TranscriptionStarted': {
// console.log('识别任务已开始')
break
}
case 'TranscriptionCompleted': {
lastFinalText.value = ''
// console.log('识别全部完成')
break
}
case 'TaskFailed': {
console.error('识别失败:', msg?.header?.status_text)
break
}
default:
console.log('未知消息类型:', name, msg)
break
}
}
})
}
const startRecording = async () => {
if (isRecording.value) return
try {
recognizedText.value = ''
lastFinalText.value = ''
await connectWebSocket()
audioStream = await navigator.mediaDevices.getUserMedia({
audio: true
})
audioContext = new(window.AudioContext || window.webkitAudioContext)({
sampleRate: 16000
})
audioInput = audioContext.createMediaStreamSource(audioStream)
scriptProcessor = audioContext.createScriptProcessor(2048, 1, 1)
scriptProcessor.onaudioprocess = (event) => {
const input = event.inputBuffer.getChannelData(0)
const pcm = new Int16Array(input.length)
let sum = 0
for (let i = 0; i < input.length; ++i) {
const s = Math.max(-1, Math.min(1, input[i]))
pcm[i] = s * 0x7FFF
sum += s * s
}
volumeLevel.value = Math.sqrt(sum / input.length)
audioDataForDisplay.value = Array(16).fill(volumeLevel.value)
if (websocket?.readyState === WebSocket.OPEN) {
websocket.send(pcm.buffer)
}
}
audioInput.connect(scriptProcessor)
scriptProcessor.connect(audioContext.destination)
isRecording.value = true
recordingDuration.value = 0
durationTimer = setInterval(() => recordingDuration.value++, 1000)
} catch (err) {
console.error('启动失败:', err)
cleanup()
}
}
const stopRecording = () => {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
if (websocket?.readyState === WebSocket.OPEN) {
websocket.send(JSON.stringify({
header: {
namespace: 'SpeechTranscriber',
name: 'StopTranscription',
message_id: generateUUID()
}
}))
websocket.close()
}
cleanup()
isStopping.value = false
}
const cancelRecording = () => {
if (!isRecording.value || isStopping.value) return
isStopping.value = true
websocket?.close()
cleanup()
isStopping.value = false
}
const cleanup = () => {
clearInterval(durationTimer)
scriptProcessor?.disconnect()
audioInput?.disconnect()
audioStream?.getTracks().forEach(track => track.stop())
audioContext?.close()
audioStream = null
audioContext = null
audioInput = null
scriptProcessor = null
websocket = null
isRecording.value = false
isSocketConnected.value = false
}
onUnmounted(() => {
if (isRecording.value) stopRecording()
})
return {
isRecording,
isStopping,
isSocketConnected,
recordingDuration,
audioDataForDisplay,
volumeLevel,
recognizedText,
lastFinalText,
startRecording,
stopRecording,
cancelRecording
}
}

View File

@@ -0,0 +1,49 @@
import {
ref
} from 'vue'
export function useScrollDirection(options = {}) {
const {
threshold = 200, // 滚动偏移阈值
throttleTime = 100, // 节流时间(毫秒)
onChange = null // 滚动方向变化的回调
} = options
const lastScrollTop = ref(0)
const accumulatedScroll = ref(0)
const isScrollingDown = ref(false)
let lastInvoke = 0
function handleScroll(e) {
const now = Date.now()
if (now - lastInvoke < throttleTime) return
lastInvoke = now
const scrollTop = e.detail.scrollTop
const delta = scrollTop - lastScrollTop.value
accumulatedScroll.value += delta
if (accumulatedScroll.value > threshold) {
if (!isScrollingDown.value) {
isScrollingDown.value = true
onChange?.(true) // 通知变更为向下
}
accumulatedScroll.value = 0
}
if (accumulatedScroll.value < -threshold) {
if (isScrollingDown.value) {
isScrollingDown.value = false
onChange?.(false) // 通知变更为向上
}
accumulatedScroll.value = 0
}
lastScrollTop.value = scrollTop
}
return {
isScrollingDown,
handleScroll
}
}

136
hook/useSpeechReader.js Normal file
View File

@@ -0,0 +1,136 @@
import {
ref,
onBeforeUnmount,
onMounted
} from 'vue'
import {
onHide,
onUnload
} from '@dcloudio/uni-app'
export function useSpeechReader() {
const isSpeaking = ref(false)
const isPaused = ref(false)
let utterance = null
const cleanMarkdown = (text) => {
return formatTextForSpeech(text)
}
const speak = (text, options = {
lang: 'zh-CN',
rate: 0.9,
pitch: 1.2
}) => {
cancelAudio() // 重置之前的
// const voices = speechSynthesis.getVoices()
// const chineseVoices = voices.filter(v => v.lang.includes('zh'))
const speechText = extractSpeechText(text);
utterance = new SpeechSynthesisUtterance(speechText)
// utterance.lang = options.lang || 'zh'
utterance.rate = options.rate || 1
utterance.pitch = options.pitch || 1.1 // 音调0 - 2偏高比较柔和
utterance.onend = () => {
isSpeaking.value = false
isPaused.value = false
}
speechSynthesis.speak(utterance)
isSpeaking.value = true
isPaused.value = false
}
const pause = () => {
if (isSpeaking.value && !isPaused.value) {
speechSynthesis.pause()
isPaused.value = true
}
}
const resume = () => {
if (isSpeaking.value && isPaused.value) {
speechSynthesis.resume()
isPaused.value = false
}
}
const cancelAudio = () => {
speechSynthesis.cancel()
isSpeaking.value = false
isPaused.value = false
}
// 页面刷新/关闭时
onMounted(() => {
if (typeof window !== 'undefined') {
window.addEventListener('beforeunload', cancelAudio)
}
})
onBeforeUnmount(() => {
cancelAudio()
if (typeof window !== 'undefined') {
window.removeEventListener('beforeunload', cancelAudio)
}
})
onHide(cancelAudio)
onUnload(cancelAudio)
return {
speak,
pause,
resume,
cancelAudio,
isSpeaking,
isPaused,
}
}
function extractSpeechText(markdown) {
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
const jobs = [];
let match;
let lastJobEndIndex = 0;
let firstJobStartIndex = -1;
// 提取岗位 json 数据及前后位置
while ((match = jobRegex.exec(markdown)) !== null) {
const jobStr = match[1];
try {
const job = JSON.parse(jobStr);
jobs.push(job);
if (firstJobStartIndex === -1) {
firstJobStartIndex = match.index;
}
lastJobEndIndex = jobRegex.lastIndex;
} catch (e) {
console.warn('JSON 解析失败', e);
}
}
// 提取引导语(第一个 job-json 之前的文字)
const guideText = firstJobStartIndex > 0 ?
markdown.slice(0, firstJobStartIndex).trim() :
'';
// 提取结束语(最后一个 job-json 之后的文字)
const endingText = lastJobEndIndex < markdown.length ?
markdown.slice(lastJobEndIndex).trim() :
'';
// 岗位信息格式化为语音文本
const jobTexts = jobs.map((job, index) => {
return `${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}`;
});
// 拼接总语音内容
const finalTextParts = [];
if (guideText) finalTextParts.push(guideText);
finalTextParts.push(...jobTexts);
if (endingText) finalTextParts.push(endingText);
return finalTextParts.join('\n');
}

249
hook/useTTSPlayer.js Normal file
View File

@@ -0,0 +1,249 @@
import {
ref,
onUnmounted,
onBeforeUnmount,
onMounted
} from 'vue'
import {
onHide,
onUnload
} from '@dcloudio/uni-app'
import WavDecoder from '@/lib/wav-decoder@1.3.0.js'
export function useTTSPlayer(wsUrl) {
const isSpeaking = ref(false)
const isPaused = ref(false)
const isComplete = ref(false)
const audioContext = new(window.AudioContext || window.webkitAudioContext)()
let playTime = audioContext.currentTime
let sourceNodes = []
let socket = null
let sampleRate = 16000
let numChannels = 1
let isHeaderDecoded = false
let pendingText = null
let currentPlayId = 0
let activePlayId = 0
const speak = (text) => {
currentPlayId++
const myPlayId = currentPlayId
reset()
pendingText = text
activePlayId = myPlayId
}
const pause = () => {
if (audioContext.state === 'running') {
audioContext.suspend()
isPaused.value = true
isSpeaking.value = false
}
}
const resume = () => {
if (audioContext.state === 'suspended') {
audioContext.resume()
isPaused.value = false
isSpeaking.value = true
}
}
const cancelAudio = () => {
stop()
}
const stop = () => {
isSpeaking.value = false
isPaused.value = false
isComplete.value = false
playTime = audioContext.currentTime
sourceNodes.forEach(node => {
try {
node.stop()
node.disconnect()
} catch (e) {}
})
sourceNodes = []
if (socket) {
socket.close()
socket = null
}
isHeaderDecoded = false
pendingText = null
}
const reset = () => {
stop()
isSpeaking.value = false
isPaused.value = false
isComplete.value = false
playTime = audioContext.currentTime
initWebSocket()
}
const initWebSocket = () => {
const thisPlayId = currentPlayId
socket = new WebSocket(wsUrl)
socket.binaryType = 'arraybuffer'
socket.onopen = () => {
if (pendingText && thisPlayId === activePlayId) {
const seepdText = extractSpeechText(pendingText)
socket.send(seepdText)
pendingText = null
}
}
socket.onmessage = async (e) => {
if (thisPlayId !== activePlayId) return // 忽略旧播放的消息
if (typeof e.data === 'string') {
try {
const msg = JSON.parse(e.data)
if (msg.status === 'complete') {
isComplete.value = true
setTimeout(() => {
if (thisPlayId === activePlayId) {
isSpeaking.value = false
}
}, (playTime - audioContext.currentTime) * 1000)
}
} catch (e) {
console.log('[TTSPlayer] 文本消息:', e.data)
}
} else if (e.data instanceof ArrayBuffer) {
if (!isHeaderDecoded) {
try {
const decoded = await WavDecoder.decode(e.data)
sampleRate = decoded.sampleRate
numChannels = decoded.channelData.length
decoded.channelData.forEach((channel, i) => {
const audioBuffer = audioContext.createBuffer(1, channel.length,
sampleRate)
audioBuffer.copyToChannel(channel, 0)
playBuffer(audioBuffer)
})
isHeaderDecoded = true
} catch (err) {
console.error('WAV 解码失败:', err)
}
} else {
const pcm = new Int16Array(e.data)
const audioBuffer = pcmToAudioBuffer(pcm, sampleRate, numChannels)
playBuffer(audioBuffer)
}
}
}
}
const pcmToAudioBuffer = (pcm, sampleRate, numChannels) => {
const length = pcm.length / numChannels
const audioBuffer = audioContext.createBuffer(numChannels, length, sampleRate)
for (let ch = 0; ch < numChannels; ch++) {
const channelData = audioBuffer.getChannelData(ch)
for (let i = 0; i < length; i++) {
const sample = pcm[i * numChannels + ch]
channelData[i] = sample / 32768
}
}
return audioBuffer
}
const playBuffer = (audioBuffer) => {
if (!isSpeaking.value) {
playTime = audioContext.currentTime
}
const source = audioContext.createBufferSource()
source.buffer = audioBuffer
source.connect(audioContext.destination)
source.start(playTime)
sourceNodes.push(source)
playTime += audioBuffer.duration
isSpeaking.value = true
}
onUnmounted(() => {
stop()
})
// 页面刷新/关闭时
onMounted(() => {
if (typeof window !== 'undefined') {
window.addEventListener('beforeunload', cancelAudio)
}
})
onBeforeUnmount(() => {
cancelAudio()
if (typeof window !== 'undefined') {
window.removeEventListener('beforeunload', cancelAudio)
}
})
onHide(cancelAudio)
onUnload(cancelAudio)
initWebSocket()
return {
speak,
pause,
resume,
cancelAudio,
isSpeaking,
isPaused,
isComplete
}
}
function extractSpeechText(markdown) {
const jobRegex = /``` job-json\s*({[\s\S]*?})\s*```/g;
const jobs = [];
let match;
let lastJobEndIndex = 0;
let firstJobStartIndex = -1;
// 提取岗位 json 数据及前后位置
while ((match = jobRegex.exec(markdown)) !== null) {
const jobStr = match[1];
try {
const job = JSON.parse(jobStr);
jobs.push(job);
if (firstJobStartIndex === -1) {
firstJobStartIndex = match.index;
}
lastJobEndIndex = jobRegex.lastIndex;
} catch (e) {
console.warn('JSON 解析失败', e);
}
}
// 提取引导语(第一个 job-json 之前的文字)
const guideText = firstJobStartIndex > 0 ?
markdown.slice(0, firstJobStartIndex).trim() :
'';
// 提取结束语(最后一个 job-json 之后的文字)
const endingText = lastJobEndIndex < markdown.length ?
markdown.slice(lastJobEndIndex).trim() :
'';
// 岗位信息格式化为语音文本
const jobTexts = jobs.map((job, index) => {
return `${index + 1} 个岗位,岗位名称是:${job.jobTitle},公司是:${job.companyName},薪资:${job.salary},地点:${job.location},学历要求:${job.education},经验要求:${job.experience}`;
});
// 拼接总语音内容
const finalTextParts = [];
if (guideText) finalTextParts.push(guideText);
finalTextParts.push(...jobTexts);
if (endingText) finalTextParts.push(endingText);
return finalTextParts.join('\n');
}