Files
live-stream-app/src/main/ipc/workflow.ts
2025-11-16 18:11:30 +08:00

587 lines
23 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import { ipcMain, BrowserWindow } from "electron";
import { spawn } from "child_process";
import { showPrompt } from "../utils/tools";
import { preload, indexHtml, ELECTRON_RENDERER_URL } from "../config";
import os from "os";
import http from "http";
let InstallWindows: BrowserWindow | null = null;
export function setupWorkflowHandlers() {
let lastJobSummary = "这是我们今天介绍的第一个岗位";
// 存储用户确认回调的Map
const modelDownloadCallbacks = new Map<string, { confirm: Function, reject: Function }>();
// 打开安装窗口
ipcMain.handle("open-install-window", async (_, args) => {
try {
if (InstallWindows) {
InstallWindows.focus();
showPrompt("下载已打开", "info");
return { success: true };
}
const { width, height, path } = args;
let installUrl = `${ELECTRON_RENDERER_URL}/#/${path}`;
console.log(installUrl);
InstallWindows = new BrowserWindow({
title: "模型下载",
width,
height,
minimizable: false, // 是否可以最小化
maximizable: false, // 是否可以最小化
closable: true, // 窗口是否可关闭
alwaysOnTop: false, // 窗口是否永远在别的窗口的上面
webPreferences: {
preload,
nodeIntegration: true,
contextIsolation: false,
},
});
// InstallWindows.webContents.openDevTools();
InstallWindows.on("closed", () => {
InstallWindows = null;
});
if (ELECTRON_RENDERER_URL) {
InstallWindows.loadURL(installUrl);
} else {
InstallWindows.loadFile(indexHtml, { hash: `/${path}` });
}
return { success: true };
} catch (error: any) {
return { success: false, error: error.message };
}
});
// 监听来自渲染器进程的 'install-ollama' 事件
ipcMain.handle("install-ollama-and-model", async (event) => {
const webContents = event.sender; // 获取发送事件的窗口
const platform = os.platform();
const modelToPull = "qwen3:8b";
const sendStatus = (status) => {
if (webContents && !webContents.isDestroyed()) {
webContents.send("install-progress", { status });
}
};
try {
sendStatus("Checking Ollama installation...");
try {
await streamCommand(
"ollama",
["-v"],
webContents,
"install-progress",
);
sendStatus("Ollama is already installed.");
} catch (error) {
// Ollama 未安装,执行安装
sendStatus("Ollama not found. Starting installation...");
if (platform === "darwin" || platform === "linux") {
// macOS / Linux - 使用官方的 curl 脚本
const installCommand =
"curl -fsSL https://ollama.com/install.sh | sh";
await streamCommand(
"sh",
["-c", installCommand],
webContents,
"install-progress",
);
} else if (platform === "win32") {
// Windows - 使用 PowerShell 下载并静默安装
const psScript = `
$ProgressPreference = 'SilentlyContinue';
$tempPath = [System.IO.Path]::Combine($env:TEMP, 'OllamaSetup.exe');
Write-Host 'Downloading OllamaSetup.exe...';
Invoke-WebRequest -Uri 'https://ollama.com/download/OllamaSetup.exe' -OutFile $tempPath;
Write-Host 'Download complete. Starting silent installer...';
Start-Process -FilePath $tempPath -ArgumentList '/S' -Wait;
Write-Host 'Installation complete. Cleaning up...';
Remove-Item $tempPath;
Write-Host 'Done.';
`;
await streamCommand(
"powershell",
[
"-ExecutionPolicy",
"Bypass",
"-NoProfile",
"-Command",
psScript,
],
webContents,
"install-progress",
);
} else {
throw new Error(`Unsupported platform: ${platform}`);
}
sendStatus("Ollama installation complete.");
}
// --- 步骤 2: 拉取模型 ---
sendStatus(
`Pulling model: ${modelToPull}... (This may take a while)`,
);
await streamCommand(
"ollama",
["pull", modelToPull],
webContents,
"install-progress",
);
sendStatus(`Model ${modelToPull} pull complete.`);
return {
success: true,
message: "Installation and model pull successful.",
};
} catch (error: any) {
console.error(error);
sendStatus(`Error: ${error.message}`);
return { success: false, message: error.message };
}
});
// 将整个工作流封装在 IPC Handler 中
ipcMain.handle("run-job-workflow", async (_, userQuery) => {
console.log("工作流: 正在准备工作...");
let currentJobData = userQuery || {};
let answerText = "";
try {
console.log("工作流: 正在调用 Ollama 生成脚本...");
const systemPromptTemplate = `# 角色 (Role) \n你是一个顶级的招聘KOL和直播带岗专家。你的风格是专业、中立、风趣能一针见血地分析岗位优劣。你不是一个AI助手你就是这个角色。 \n\n# 上下文 (Context) \n我正在运行一个自动化工作流。我会\"一个一个\"地喂给你岗位数据。\n\n # 任务 (Task) \n你的任务是执行以下两个操作并严格按照“输出格式”返回内容\n1. 生成口播稿根据【输入数据A】(上一个岗位的摘要) 和【输入数据B】(当前岗位的JSON)生成一段完整的、约90秒的口播稿。\n2. 生成新摘要为【输入数据B】的“当前岗位”生成一个简短的摘要例如XX公司的XX岗以便在下一次调用时使用。\n\n# 核心指令 (Core Instruction)\n### 口播稿的生成规则 (Rules for the Script) \n1. 衔接口播稿必须以一个自然的“过渡句”开头基于【输入数据A】。 特殊情况如果【输入数据A】是“这是我们今天介绍的第一个岗位。”则开头应是“热场”或“总起”而不是衔接。 \n2. 内容:必须介绍岗位名称 \`jobTitle\`\n3. 提炼:从 \`jobLocation\`, \`companyName\`, \`education\`,\`experience\`\`scale\` 中提炼“亮点 (Pro)”。 \n4. 翻译:用“人话”翻译 \`description\`\n5. 视角:你是在“评测”这个岗位,而不是在“推销”。\n\n### 口播稿的纯文本要求 (Pure Text Rules for the Script ONLY) \n**[重要]** 以下规则 *仅适用于* “口播稿”部分,不适用于“新摘要”部分: \n1. 绝不包含任何Markdown格式 (\`**\`, \`#\`)。 \n2. 绝不包含任何标签、括号或元数据 (\`[]\`, \`()\`)。 \n3. 绝不包含任何寒暄、问候、或自我介绍 (例如 \"你好\", \"当然\")。 \n4. 必须是可以直接朗读的、完整的、流畅的纯文本。\n\n# 输入数据 (Input Data)\n\n## 输入数据A (上一个岗位摘要) \n${lastJobSummary}\n\n## 输入数据B (当前岗位JSON)\n\`\`\`json\n${JSON.stringify(currentJobData, null, 2)}\n\`\`\`\n\n# 输出格式 (Output Format)\n**[绝对严格的指令]** \n你必须严格按照下面这个“两部分”格式输出使用 \`---NEXT_SUMMARY---\` 作为唯一的分隔符。 绝不在分隔符之外添加任何多余的文字、解释或Markdown。 \n\n[这里是AI生成的、符合上述所有“纯文本要求”的完整口播稿] \n---NEXT_SUMMARY--- \n[这里是AI为“当前岗位”生成的简短新摘要]`;
answerText = await runOllamaNonStream(
systemPromptTemplate,
"qwen3:8b",
);
if (!answerText) {
throw new Error("Ollama 返回为空");
}
} catch (e) {
return "抱歉AI 模型在生成脚本时出错。";
}
try {
console.log("工作流: 正在解析 AI 输出...");
let script = "抱歉AI没有按预定格式返回脚本请稍后重试。";
let summary = "这是我们今天介绍的第一个岗位。"; // 这是一个安全的“重置”摘要
if (answerText && typeof answerText === "string") {
const parts = answerText.split("---NEXT_SUMMARY---");
if (parts[0] && parts[0].trim() !== "") {
script = parts[0].trim();
}
if (parts[1] && parts[1].trim() !== "") {
summary = parts[1].trim();
}
}
console.log("工作流: 正在更新状态...");
lastJobSummary = summary; // 关键:更新主进程中的状态
console.log("工作流: 完成,返回口播稿。");
return { success: true, data: script }; // 将最终的“口播稿”返回给渲染进程
} catch (e: any) {
console.error("代码运行或变量更新节点出错:", e);
return "抱歉,处理 AI 响应时出错。";
}
});
// 检查 Ollama 服务器是否正在运行
ipcMain.handle("check-ollama-status", async () => {
return await checkOllamaServer();
});
// 检查指定模型是否存在
ipcMain.handle("check-model-exists", async (_, modelName = "qwen3:8b") => {
try {
const response = await fetch("http://127.0.0.1:11434/api/tags", {
method: "GET",
headers: { "Content-Type": "application/json" },
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.statusText}`);
}
const data = await response.json();
const models = data.models || [];
// 检查模型是否存在于本地
const modelExists = models.some((model: any) => model.name === modelName);
return {
success: true,
exists: modelExists,
models: models.map((m: any) => ({ name: m.name, size: m.size, modified_at: m.modified_at }))
};
} catch (error: any) {
console.error("Check model error:", error);
return {
success: false,
exists: false,
error: error.message
};
}
});
// 加载模型检查ollama状<61><E78AB6><EFBFBD>下载模型如果不存在
ipcMain.handle("load-model", async (_, modelName = "qwen3:8b") => {
const webContents = BrowserWindow.getFocusedWindow()?.webContents;
const sendStatus = (status: string, type = "info") => {
if (webContents && !webContents.isDestroyed()) {
webContents.send("model-load-progress", {
status,
type,
timestamp: new Date().toISOString()
});
}
};
// 发送询问是否下载模型的消息
const askUserToDownload = () => {
if (webContents && !webContents.isDestroyed()) {
webContents.send("model-download-confirm", {
modelName,
message: `模型 ${modelName} 不存在,是否下载?`,
timestamp: new Date().toISOString()
});
}
};
try {
sendStatus("正在检查Ollama服务状态...", "info");
// 1. 检查Ollama是否运行
const isOllamaRunning = await checkOllamaServer();
if (!isOllamaRunning) {
sendStatus("Ollama服务未运行正在启动...", "warning");
try {
// 尝试启动Ollama服务
await runCommand("ollama", ["ps"]);
await new Promise(resolve => setTimeout(resolve, 3000));
const isRunningNow = await checkOllamaServer();
if (!isRunningNow) {
throw new Error("无法启动Ollama服务请手动启动");
}
sendStatus("Ollama服务启动成功", "success");
} catch (error: any) {
sendStatus(`启动Ollama服务失败: ${error.message}`, "error");
return {
success: false,
message: `Ollama服务启动失败: ${error.message}`,
downloaded: false
};
}
} else {
sendStatus("Ollama服务正在运行", "success");
}
// 2. 检查模型是否存在
sendStatus(`正在检查模型 ${modelName} 是否存在...`, "info");
const modelCheckResult = await new Promise<{ exists: boolean, models: any[] }>((resolve, reject) => {
fetch("http://127.0.0.1:11434/api/tags", {
method: "GET",
headers: { "Content-Type": "application/json" },
})
.then(response => response.json())
.then(data => {
const models = data.models || [];
const modelExists = models.some((model: any) => model.name === modelName);
resolve({ exists: modelExists, models });
})
.catch(reject);
});
if (modelCheckResult.exists) {
sendStatus(`模型 ${modelName} 已存在,无需下载`, "success");
return {
success: true,
message: `模型 ${modelName} 已就绪`,
downloaded: false
};
}
// 3. 模型不存在,询问用户是否下载
askUserToDownload();
// 等待用户确认
const userConfirmed = await new Promise<boolean>((resolve, reject) => {
const timeout = setTimeout(() => {
modelDownloadCallbacks.delete(modelName);
resolve(false); // 30秒超时自动取消
}, 30000);
// 存储回调函数
modelDownloadCallbacks.set(modelName, {
confirm: () => {
clearTimeout(timeout);
resolve(true);
},
reject: (error: any) => {
clearTimeout(timeout);
reject(error);
}
});
});
if (!userConfirmed) {
sendStatus("用户取消了模型下载", "info");
return {
success: false,
message: `用户取消了 ${modelName} 模型的下载`,
downloaded: false
};
}
// 4. 用户确认,开始下载模型
sendStatus(`开始下载模型 ${modelName},这可能需要一些时间...`, "info");
await new Promise<void>((resolve, reject) => {
const process = spawn("ollama", ["pull", modelName], { shell: true });
const sendProgress = (data: any) => {
if (webContents && !webContents.isDestroyed()) {
webContents.send("model-load-progress", {
status: data.toString().trim(),
type: "download",
timestamp: new Date().toISOString()
});
}
};
process.stdout.on("data", sendProgress);
process.stderr.on("data", sendProgress);
process.on("close", (code) => {
if (code === 0) {
resolve();
} else {
reject(new Error(`模型下载失败,退出码: ${code}`));
}
});
process.on("error", (err: any) => {
reject(new Error(`启动下载进程失败: ${err.message}`));
});
});
sendStatus(`模型 ${modelName} 下载完成!`, "success");
return {
success: true,
message: `模型 ${modelName} 下载并加载成功`,
downloaded: true
};
} catch (error: any) {
console.error("Load model error:", error);
sendStatus(`加载模型失败: ${error.message}`, "error");
return {
success: false,
message: error.message,
downloaded: false
};
}
});
// 处理用户对模型下载的确认响应
ipcMain.on("model-download-confirm-response", (_event, data) => {
const { modelName, confirmed } = data;
const callback = modelDownloadCallbacks.get(modelName);
if (callback) {
modelDownloadCallbacks.delete(modelName);
if (confirmed) {
callback.confirm();
} else {
callback.reject(new Error("用户取消了模型下载"));
}
}
});
// 润色文本的处理器
ipcMain.handle("polish-text", async (_, text) => {
try {
if (!text || typeof text !== 'string' || text.trim() === '') {
return {
success: false,
error: "输入文本不能为空"
};
}
const systemPrompt = `你是一个专业的文本润色专家。请将以下文本进行润色,使其更加流畅、自然、专业。要求:
1. 保持原意不变
2. 使语言更加流畅自然
3. 提升表达的准确性
4. 适合直播场合使用
5. 保持简洁明了
请直接返回润色后的文本,不要添加任何其他说明或解释。
原文:${text.trim()}`;
const polishedText = await runOllamaNonStream(systemPrompt, "qwen3:8b");
if (!polishedText) {
throw new Error("AI模型返回为空");
}
return {
success: true,
data: polishedText.trim()
};
} catch (error: any) {
console.error("润色文本失败:", error);
return {
success: false,
error: error.message || "润色服务出现错误"
};
}
});
// 处理器:检查服务,如果没运行,就用一个轻量命令唤醒它
ipcMain.handle("ensure-ollama-running", async () => {
let isRunning = await checkOllamaServer();
if (isRunning) {
return {
success: true,
message: "Ollama服务器已在运行.",
};
}
// 服务未运行。
// 我们运行 'ollama ps'。这个命令会与服务通信,
// 如果服务没启动Ollama CLI 会自动启动它。
try {
await runCommand("ollama", ["ps"]);
// 给服务一点启动时间 (例如 2 秒)
await new Promise((resolve) => setTimeout(resolve, 2000));
// 再次检查
isRunning = await checkOllamaServer();
if (isRunning) {
return {
success: true,
message: "Ollama 已在后台启动",
};
} else {
return {
success: false,
message: "服务启动失败",
};
}
} catch (error: any) {
console.error("错误:", error);
return {
success: false,
message: `错误: ${error.message}`,
};
}
});
}
// 辅助函数:检查 Ollama API 是否可访问
function checkOllamaServer() {
return new Promise((resolve) => {
// 默认端口是 11434
const req = http.get("http://127.0.0.1:11434/", (res) => {
// "Ollama is running" 的响应码是 200
resolve(res.statusCode === 200);
});
// 如果连接被拒绝 (ECONNREFUSED),则服务未运行
req.on("error", () => {
resolve(false);
});
});
}
// 辅助函数:运行一个简单的命令并等待它完成
function runCommand(command, args) {
return new Promise((resolve, reject) => {
const process = spawn(command, args, { shell: true });
process.on("close", (code) => {
if (code === 0) {
resolve(null);
} else {
reject(new Error(`Command failed with code ${code}`));
}
});
process.on("error", (err) => reject(err));
});
}
// 这是一个非流式的 Ollama 助手函数
async function runOllamaNonStream(prompt, model = "qwen3:8b") {
try {
const response = await fetch("http://127.0.0.1:11434/api/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: model,
messages: [{ role: "user", content: prompt }],
stream: false, // 关键:关闭流式
}),
});
if (!response.ok) {
throw new Error(`Ollama API error: ${response.statusText}`);
}
const data = await response.json();
// data.message.content 包含了完整的 AI 回复
return data.message.content;
} catch (error) {
console.error("Ollama Chat Error:", error);
return null; // 返回 null 以便后续逻辑处理
}
}
function streamCommand(command, args, webContents, eventName) {
return new Promise((resolve, reject) => {
const process = spawn(command, args, { shell: true });
const send = (channel, data) => {
if (webContents && !webContents.isDestroyed()) {
webContents.send(channel, data);
}
};
process.stdout.on("data", (data) => {
send(eventName, { type: "stdout", data: data.toString() });
});
process.stderr.on("data", (data) => {
send(eventName, { type: "stderr", data: data.toString() });
});
process.on("close", (code) => {
if (code === 0) {
resolve(null);
} else {
reject(new Error(`Process exited with code ${code}`));
}
});
process.on("error", (err) => {
reject(err);
});
});
}