Files
LocalAgent/main.py
Mimikko-zeus 1ba5f0f7d6 feat: implement streaming support for chat and enhance safety review process
- Updated .env.example to include API key placeholder and configuration instructions.
- Refactored main.py to support streaming responses from the LLM, improving user experience during chat interactions.
- Enhanced LLMClient to include methods for streaming chat and collecting responses.
- Modified safety review process to pass static analysis warnings to the LLM for better code safety evaluation.
- Improved UI components in chat_view.py to handle streaming messages effectively.
2026-01-07 09:43:40 +08:00

542 lines
17 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""
LocalAgent - Windows 本地 AI 执行助手 (MVP)
========================================
配置说明
========================================
1. 复制 .env.example 为 .env
2. 在 .env 中填入你的 SiliconFlow API Key:
LLM_API_KEY=sk-xxxxx
========================================
运行方式
========================================
方式一:使用 Anaconda
conda create -n localagent python=3.10
conda activate localagent
pip install -r requirements.txt
python main.py
方式二:直接运行(需已安装依赖)
python main.py
========================================
测试方法
========================================
1. 对话测试:输入 "今天天气怎么样" → 应识别为 chat
2. 执行测试:
- 将测试文件放入 workspace/input 目录
- 输入 "把这些文件复制一份" → 应识别为 execution
- 确认执行后,检查 workspace/output 目录
========================================
"""
import os
import sys
import tkinter as tk
from tkinter import messagebox
from pathlib import Path
from typing import Optional
from dotenv import load_dotenv
import threading
import queue
# 确保项目根目录在 Python 路径中
PROJECT_ROOT = Path(__file__).parent
ENV_PATH = PROJECT_ROOT / ".env"
sys.path.insert(0, str(PROJECT_ROOT))
# 在导入其他模块之前先加载环境变量
load_dotenv(ENV_PATH)
from llm.client import get_client, LLMClientError
from llm.prompts import (
EXECUTION_PLAN_SYSTEM, EXECUTION_PLAN_USER,
CODE_GENERATION_SYSTEM, CODE_GENERATION_USER
)
from intent.classifier import classify_intent, IntentResult
from intent.labels import CHAT, EXECUTION
from safety.rule_checker import check_code_safety
from safety.llm_reviewer import review_code_safety
from executor.sandbox_runner import SandboxRunner, ExecutionResult
from ui.chat_view import ChatView
from ui.task_guide_view import TaskGuideView
class LocalAgentApp:
"""
LocalAgent 主应用
职责:
1. 管理 UI 状态切换
2. 协调各模块工作流程
3. 处理用户交互
"""
def __init__(self):
self.workspace = PROJECT_ROOT / "workspace"
self.runner = SandboxRunner(str(self.workspace))
# 当前任务状态
self.current_task: Optional[dict] = None
# 线程通信队列
self.result_queue = queue.Queue()
# 初始化 UI
self._init_ui()
def _init_ui(self):
"""初始化 UI"""
self.root = tk.Tk()
self.root.title("LocalAgent - 本地 AI 助手")
self.root.geometry("800x700")
self.root.configure(bg='#1e1e1e')
# 设置窗口图标(如果有的话)
try:
self.root.iconbitmap(PROJECT_ROOT / "icon.ico")
except:
pass
# 主容器
self.main_container = tk.Frame(self.root, bg='#1e1e1e')
self.main_container.pack(fill=tk.BOTH, expand=True)
# 聊天视图
self.chat_view = ChatView(self.main_container, self._on_user_input)
# 任务引导视图(初始隐藏)
self.task_view: Optional[TaskGuideView] = None
# 定期检查后台任务结果
self._check_queue()
def _check_queue(self):
"""检查后台任务队列"""
try:
while True:
callback, args = self.result_queue.get_nowait()
callback(*args)
except queue.Empty:
pass
# 每 100ms 检查一次
self.root.after(100, self._check_queue)
def _run_in_thread(self, func, callback, *args):
"""在后台线程运行函数,完成后回调"""
def wrapper():
try:
result = func(*args)
self.result_queue.put((callback, (result, None)))
except Exception as e:
self.result_queue.put((callback, (None, e)))
thread = threading.Thread(target=wrapper, daemon=True)
thread.start()
def _on_user_input(self, user_input: str):
"""处理用户输入"""
# 显示用户消息
self.chat_view.add_message(user_input, 'user')
self.chat_view.set_input_enabled(False)
self.chat_view.add_message("正在分析您的需求...", 'system')
# 在后台线程进行意图识别
self._run_in_thread(
classify_intent,
lambda result, error: self._on_intent_result(user_input, result, error),
user_input
)
def _on_intent_result(self, user_input: str, intent_result: Optional[IntentResult], error: Optional[Exception]):
"""意图识别完成回调"""
if error:
self.chat_view.add_message(f"意图识别失败: {str(error)}", 'error')
self.chat_view.set_input_enabled(True)
return
if intent_result.label == CHAT:
# 对话模式
self._handle_chat(user_input, intent_result)
else:
# 执行模式
self._handle_execution(user_input, intent_result)
def _handle_chat(self, user_input: str, intent_result: IntentResult):
"""处理对话任务"""
self.chat_view.add_message(
f"识别为对话模式 (原因: {intent_result.reason})",
'system'
)
# 开始流式消息
self.chat_view.start_stream_message('assistant')
# 在后台线程调用 LLM流式
def do_chat_stream():
client = get_client()
model = os.getenv("GENERATION_MODEL_NAME")
full_response = []
for chunk in client.chat_stream(
messages=[{"role": "user", "content": user_input}],
model=model,
temperature=0.7,
max_tokens=2048,
timeout=300
):
full_response.append(chunk)
# 通过队列发送 chunk 到主线程更新 UI
self.result_queue.put((self._on_chat_chunk, (chunk,)))
return ''.join(full_response)
self._run_in_thread(
do_chat_stream,
self._on_chat_complete
)
def _on_chat_chunk(self, chunk: str):
"""收到对话片段回调(主线程)"""
self.chat_view.append_stream_chunk(chunk)
def _on_chat_complete(self, response: Optional[str], error: Optional[Exception]):
"""对话完成回调"""
self.chat_view.end_stream_message()
if error:
self.chat_view.add_message(f"对话失败: {str(error)}", 'error')
self.chat_view.set_input_enabled(True)
def _handle_execution(self, user_input: str, intent_result: IntentResult):
"""处理执行任务"""
self.chat_view.add_message(
f"识别为执行任务 (置信度: {intent_result.confidence:.0%})\n原因: {intent_result.reason}",
'system'
)
self.chat_view.add_message("正在生成执行计划...", 'system')
# 保存用户输入和意图结果
self.current_task = {
'user_input': user_input,
'intent_result': intent_result
}
# 在后台线程生成执行计划
self._run_in_thread(
self._generate_execution_plan,
self._on_plan_generated,
user_input
)
def _on_plan_generated(self, plan: Optional[str], error: Optional[Exception]):
"""执行计划生成完成回调"""
if error:
self.chat_view.add_message(f"生成执行计划失败: {str(error)}", 'error')
self.chat_view.set_input_enabled(True)
self.current_task = None
return
self.current_task['execution_plan'] = plan
self.chat_view.add_message("正在生成执行代码...", 'system')
# 在后台线程生成代码
self._run_in_thread(
self._generate_code,
self._on_code_generated,
self.current_task['user_input'],
plan
)
def _on_code_generated(self, code: Optional[str], error: Optional[Exception]):
"""代码生成完成回调"""
if error:
self.chat_view.add_message(f"生成代码失败: {str(error)}", 'error')
self.chat_view.set_input_enabled(True)
self.current_task = None
return
self.current_task['code'] = code
self.chat_view.add_message("正在进行安全检查...", 'system')
# 硬规则检查(同步,很快)
rule_result = check_code_safety(code)
if not rule_result.passed:
violations = "\n".join(f"{v}" for v in rule_result.violations)
self.chat_view.add_message(
f"安全检查未通过,任务已取消:\n{violations}",
'error'
)
self.chat_view.set_input_enabled(True)
self.current_task = None
return
# 保存警告信息,传递给 LLM 审查
self.current_task['warnings'] = rule_result.warnings
# 在后台线程进行 LLM 安全审查
self._run_in_thread(
lambda: review_code_safety(
self.current_task['user_input'],
self.current_task['execution_plan'],
code,
rule_result.warnings # 传递警告给 LLM
),
self._on_safety_reviewed
)
def _on_safety_reviewed(self, review_result, error: Optional[Exception]):
"""安全审查完成回调"""
if error:
self.chat_view.add_message(f"安全审查失败: {str(error)}", 'error')
self.chat_view.set_input_enabled(True)
self.current_task = None
return
if not review_result.passed:
self.chat_view.add_message(
f"安全审查未通过: {review_result.reason}",
'error'
)
self.chat_view.set_input_enabled(True)
self.current_task = None
return
self.chat_view.add_message("安全检查通过,请确认执行", 'system')
# 显示任务引导视图
self._show_task_guide()
def _generate_execution_plan(self, user_input: str) -> str:
"""生成执行计划(使用流式传输)"""
client = get_client()
model = os.getenv("GENERATION_MODEL_NAME")
# 使用流式传输,避免超时
response = client.chat_stream_collect(
messages=[
{"role": "system", "content": EXECUTION_PLAN_SYSTEM},
{"role": "user", "content": EXECUTION_PLAN_USER.format(user_input=user_input)}
],
model=model,
temperature=0.3,
max_tokens=1024,
timeout=300 # 5分钟超时
)
return response
def _generate_code(self, user_input: str, execution_plan: str) -> str:
"""生成执行代码(使用流式传输)"""
client = get_client()
model = os.getenv("GENERATION_MODEL_NAME")
# 使用流式传输,避免超时
response = client.chat_stream_collect(
messages=[
{"role": "system", "content": CODE_GENERATION_SYSTEM},
{"role": "user", "content": CODE_GENERATION_USER.format(
user_input=user_input,
execution_plan=execution_plan
)}
],
model=model,
temperature=0.2,
max_tokens=4096, # 代码可能较长
timeout=300 # 5分钟超时
)
# 提取代码块
code = self._extract_code(response)
return code
def _extract_code(self, response: str) -> str:
"""从 LLM 响应中提取代码"""
import re
# 尝试提取 ```python ... ``` 代码块
pattern = r'```python\s*(.*?)\s*```'
matches = re.findall(pattern, response, re.DOTALL)
if matches:
return matches[0]
# 尝试提取 ``` ... ``` 代码块
pattern = r'```\s*(.*?)\s*```'
matches = re.findall(pattern, response, re.DOTALL)
if matches:
return matches[0]
# 如果没有代码块,返回原始响应
return response
def _show_task_guide(self):
"""显示任务引导视图"""
if not self.current_task:
return
# 隐藏聊天视图
self.chat_view.get_frame().pack_forget()
# 创建任务引导视图
self.task_view = TaskGuideView(
self.main_container,
on_execute=self._on_execute_task,
on_cancel=self._on_cancel_task,
workspace_path=self.workspace
)
# 设置内容
self.task_view.set_intent_result(
self.current_task['intent_result'].reason,
self.current_task['intent_result'].confidence
)
self.task_view.set_execution_plan(self.current_task['execution_plan'])
# 显示
self.task_view.show()
def _on_execute_task(self):
"""执行任务"""
if not self.current_task:
return
self.task_view.set_buttons_enabled(False)
# 在后台线程执行
def do_execute():
return self.runner.execute(self.current_task['code'])
self._run_in_thread(
do_execute,
self._on_execution_complete
)
def _on_execution_complete(self, result: Optional[ExecutionResult], error: Optional[Exception]):
"""执行完成回调"""
if error:
messagebox.showerror("执行错误", f"执行失败: {str(error)}")
else:
self._show_execution_result(result)
# 刷新输出文件列表
if self.task_view:
self.task_view.refresh_output()
self._back_to_chat()
def _show_execution_result(self, result: ExecutionResult):
"""显示执行结果"""
if result.success:
status = "执行成功"
else:
status = "执行失败"
message = f"""{status}
任务 ID: {result.task_id}
耗时: {result.duration_ms} ms
日志文件: {result.log_path}
输出:
{result.stdout if result.stdout else '(无输出)'}
{f'错误信息: {result.stderr}' if result.stderr else ''}
"""
if result.success:
messagebox.showinfo("执行结果", message)
# 打开 output 目录
os.startfile(str(self.workspace / "output"))
else:
messagebox.showerror("执行结果", message)
def _on_cancel_task(self):
"""取消任务"""
self.current_task = None
self._back_to_chat()
def _back_to_chat(self):
"""返回聊天视图"""
if self.task_view:
self.task_view.hide()
self.task_view = None
self.chat_view.get_frame().pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
self.chat_view.set_input_enabled(True)
self.current_task = None
def run(self):
"""运行应用"""
self.root.mainloop()
def check_environment():
"""检查运行环境"""
load_dotenv(ENV_PATH)
api_key = os.getenv("LLM_API_KEY")
if not api_key or api_key == "your_api_key_here":
print("=" * 50)
print("错误: 未配置 LLM API Key")
print("=" * 50)
print()
print("请按以下步骤配置:")
print("1. 复制 .env.example 为 .env")
print("2. 在 .env 中设置 LLM_API_KEY=你的API密钥")
print()
print("获取 API Key: https://siliconflow.cn")
print("=" * 50)
# 显示 GUI 错误提示
root = tk.Tk()
root.withdraw()
messagebox.showerror(
"配置错误",
"未配置 LLM API Key\n\n"
"请按以下步骤配置:\n"
"1. 复制 .env.example 为 .env\n"
"2. 在 .env 中设置 LLM_API_KEY=你的API密钥\n\n"
"获取 API Key: https://siliconflow.cn"
)
root.destroy()
return False
return True
def main():
"""主入口"""
print("=" * 50)
print("LocalAgent - Windows 本地 AI 执行助手")
print("=" * 50)
# 检查环境
if not check_environment():
sys.exit(1)
# 创建工作目录
workspace = PROJECT_ROOT / "workspace"
(workspace / "input").mkdir(parents=True, exist_ok=True)
(workspace / "output").mkdir(parents=True, exist_ok=True)
(workspace / "logs").mkdir(parents=True, exist_ok=True)
print(f"工作目录: {workspace}")
print(f"输入目录: {workspace / 'input'}")
print(f"输出目录: {workspace / 'output'}")
print(f"日志目录: {workspace / 'logs'}")
print("=" * 50)
# 启动应用
app = LocalAgentApp()
app.run()
if __name__ == "__main__":
main()