Initial commit
This commit is contained in:
2
llm/__init__.py
Normal file
2
llm/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# LLM 模块
|
||||
|
||||
BIN
llm/__pycache__/__init__.cpython-310.pyc
Normal file
BIN
llm/__pycache__/__init__.cpython-310.pyc
Normal file
Binary file not shown.
BIN
llm/__pycache__/__init__.cpython-313.pyc
Normal file
BIN
llm/__pycache__/__init__.cpython-313.pyc
Normal file
Binary file not shown.
BIN
llm/__pycache__/client.cpython-310.pyc
Normal file
BIN
llm/__pycache__/client.cpython-310.pyc
Normal file
Binary file not shown.
BIN
llm/__pycache__/client.cpython-313.pyc
Normal file
BIN
llm/__pycache__/client.cpython-313.pyc
Normal file
Binary file not shown.
BIN
llm/__pycache__/prompts.cpython-310.pyc
Normal file
BIN
llm/__pycache__/prompts.cpython-310.pyc
Normal file
Binary file not shown.
BIN
llm/__pycache__/prompts.cpython-313.pyc
Normal file
BIN
llm/__pycache__/prompts.cpython-313.pyc
Normal file
Binary file not shown.
124
llm/client.py
Normal file
124
llm/client.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
LLM 统一调用客户端
|
||||
所有模型通过 SiliconFlow API 调用
|
||||
"""
|
||||
|
||||
import os
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# 获取项目根目录
|
||||
PROJECT_ROOT = Path(__file__).parent.parent
|
||||
ENV_PATH = PROJECT_ROOT / ".env"
|
||||
|
||||
|
||||
class LLMClientError(Exception):
|
||||
"""LLM 客户端异常"""
|
||||
pass
|
||||
|
||||
|
||||
class LLMClient:
|
||||
"""
|
||||
统一的 LLM 调用客户端
|
||||
|
||||
使用方式:
|
||||
client = LLMClient()
|
||||
response = client.chat(
|
||||
messages=[{"role": "user", "content": "你好"}],
|
||||
model="Qwen/Qwen2.5-7B-Instruct",
|
||||
temperature=0.7,
|
||||
max_tokens=1024
|
||||
)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
load_dotenv(ENV_PATH)
|
||||
|
||||
self.api_url = os.getenv("LLM_API_URL")
|
||||
self.api_key = os.getenv("LLM_API_KEY")
|
||||
|
||||
if not self.api_url:
|
||||
raise LLMClientError("未配置 LLM_API_URL,请检查 .env 文件")
|
||||
if not self.api_key or self.api_key == "your_api_key_here":
|
||||
raise LLMClientError("未配置有效的 LLM_API_KEY,请检查 .env 文件")
|
||||
|
||||
def chat(
|
||||
self,
|
||||
messages: list[dict],
|
||||
model: str,
|
||||
temperature: float = 0.7,
|
||||
max_tokens: int = 1024
|
||||
) -> str:
|
||||
"""
|
||||
调用 LLM 进行对话
|
||||
|
||||
Args:
|
||||
messages: 消息列表,格式为 [{"role": "user/assistant/system", "content": "..."}]
|
||||
model: 模型名称
|
||||
temperature: 温度参数,控制随机性
|
||||
max_tokens: 最大生成 token 数
|
||||
|
||||
Returns:
|
||||
LLM 生成的文本内容
|
||||
|
||||
Raises:
|
||||
LLMClientError: 网络异常或 API 返回错误
|
||||
"""
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"temperature": temperature,
|
||||
"max_tokens": max_tokens
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
self.api_url,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
timeout=60
|
||||
)
|
||||
except requests.exceptions.Timeout:
|
||||
raise LLMClientError("请求超时,请检查网络连接")
|
||||
except requests.exceptions.ConnectionError:
|
||||
raise LLMClientError("网络连接失败,请检查网络设置")
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise LLMClientError(f"网络请求异常: {str(e)}")
|
||||
|
||||
if response.status_code != 200:
|
||||
error_msg = f"API 返回错误 (状态码: {response.status_code})"
|
||||
try:
|
||||
error_detail = response.json()
|
||||
if "error" in error_detail:
|
||||
error_msg += f": {error_detail['error']}"
|
||||
except:
|
||||
error_msg += f": {response.text[:200]}"
|
||||
raise LLMClientError(error_msg)
|
||||
|
||||
try:
|
||||
result = response.json()
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
return content
|
||||
except (KeyError, IndexError, TypeError) as e:
|
||||
raise LLMClientError(f"解析 API 响应失败: {str(e)}")
|
||||
|
||||
|
||||
# 全局单例(延迟初始化)
|
||||
_client: Optional[LLMClient] = None
|
||||
|
||||
|
||||
def get_client() -> LLMClient:
|
||||
"""获取 LLM 客户端单例"""
|
||||
global _client
|
||||
if _client is None:
|
||||
_client = LLMClient()
|
||||
return _client
|
||||
|
||||
130
llm/prompts.py
Normal file
130
llm/prompts.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
Prompt 模板集合
|
||||
所有与 LLM 交互的 Prompt 统一在此管理
|
||||
"""
|
||||
|
||||
# ========================================
|
||||
# 意图识别 Prompt
|
||||
# ========================================
|
||||
|
||||
INTENT_CLASSIFICATION_SYSTEM = """你是一个意图分类器。判断用户输入是"普通对话"还是"本地执行任务"。
|
||||
|
||||
规则:
|
||||
- chat: 闲聊、问答、知识查询(如天气、新闻、解释概念)
|
||||
- execution: 需要操作本地文件的任务(如复制、移动、重命名、整理文件)
|
||||
|
||||
只输出JSON,格式:
|
||||
{"label": "chat或execution", "confidence": 0.0到1.0, "reason": "简短中文理由"}"""
|
||||
|
||||
INTENT_CLASSIFICATION_USER = """判断以下输入的意图:
|
||||
{user_input}"""
|
||||
|
||||
|
||||
# ========================================
|
||||
# 执行计划生成 Prompt
|
||||
# ========================================
|
||||
|
||||
EXECUTION_PLAN_SYSTEM = """你是一个任务规划助手。根据用户需求,生成清晰的执行计划。
|
||||
|
||||
约束:
|
||||
1. 所有操作只在 workspace 目录内进行
|
||||
2. 输入文件来自 workspace/input
|
||||
3. 输出文件保存到 workspace/output
|
||||
4. 绝不修改或删除原始文件
|
||||
5. 不进行任何网络操作
|
||||
|
||||
输出格式(中文):
|
||||
## 任务理解
|
||||
[简述用户想做什么]
|
||||
|
||||
## 执行步骤
|
||||
1. [步骤1]
|
||||
2. [步骤2]
|
||||
...
|
||||
|
||||
## 输入输出
|
||||
- 输入目录: workspace/input
|
||||
- 输出目录: workspace/output
|
||||
|
||||
## 风险提示
|
||||
[可能失败的情况]"""
|
||||
|
||||
EXECUTION_PLAN_USER = """用户需求:{user_input}
|
||||
|
||||
请生成执行计划。"""
|
||||
|
||||
|
||||
# ========================================
|
||||
# 代码生成 Prompt
|
||||
# ========================================
|
||||
|
||||
CODE_GENERATION_SYSTEM = """你是一个 Python 代码生成器。根据执行计划生成安全的文件处理代码。
|
||||
|
||||
硬性约束:
|
||||
1. 只能操作 workspace/input 和 workspace/output 目录
|
||||
2. 禁止使用: requests, socket, urllib, subprocess, os.system
|
||||
3. 禁止删除文件: os.remove, shutil.rmtree, os.unlink
|
||||
4. 禁止访问 workspace 外的任何路径
|
||||
5. 只使用标准库: os, shutil, pathlib, json, csv 等
|
||||
|
||||
代码模板:
|
||||
```python
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
# 工作目录
|
||||
WORKSPACE = Path(__file__).parent
|
||||
INPUT_DIR = WORKSPACE / "input"
|
||||
OUTPUT_DIR = WORKSPACE / "output"
|
||||
|
||||
def main():
|
||||
# 确保输出目录存在
|
||||
OUTPUT_DIR.mkdir(exist_ok=True)
|
||||
|
||||
# TODO: 实现具体逻辑
|
||||
|
||||
print("任务完成")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
只输出 Python 代码,不要其他解释。"""
|
||||
|
||||
CODE_GENERATION_USER = """执行计划:
|
||||
{execution_plan}
|
||||
|
||||
用户原始需求:{user_input}
|
||||
|
||||
请生成 Python 代码。"""
|
||||
|
||||
|
||||
# ========================================
|
||||
# 安全审查 Prompt
|
||||
# ========================================
|
||||
|
||||
SAFETY_REVIEW_SYSTEM = """你是一个代码安全审查员。检查代码是否符合安全规范。
|
||||
|
||||
检查项:
|
||||
1. 是否只操作 workspace 目录
|
||||
2. 是否有网络请求代码
|
||||
3. 是否有危险的文件删除操作
|
||||
4. 是否有执行外部命令的代码
|
||||
5. 代码逻辑是否与用户需求一致
|
||||
|
||||
输出JSON格式:
|
||||
{"pass": true或false, "reason": "中文审查结论"}"""
|
||||
|
||||
SAFETY_REVIEW_USER = """用户需求:{user_input}
|
||||
|
||||
执行计划:
|
||||
{execution_plan}
|
||||
|
||||
待审查代码:
|
||||
```python
|
||||
{code}
|
||||
```
|
||||
|
||||
请进行安全审查。"""
|
||||
|
||||
Reference in New Issue
Block a user