Files
QQbot/src/handlers/message_handler_ai.py

1333 lines
50 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""
集成 AI 能力的 QQ 消息处理器。
"""
import asyncio
import json
from pathlib import Path
import re
from typing import Any, Dict, Optional
import httpx
from botpy.message import Message
from src.ai import AIClient
from src.ai.base import ModelConfig, ModelProvider
from src.ai.mcp import MCPManager
from src.ai.mcp.servers import FileSystemMCPServer
from src.ai.personality import PersonalityProfile, PersonalityTrait
from src.ai.skills import SkillsManager
from src.core.config import Config
from src.utils.logger import setup_logger
logger = setup_logger("MessageHandler")
class MessageHandler:
"""消息处理器(集成 AI 功能)。"""
MODEL_KEY_PATTERN = re.compile(r"[^a-zA-Z0-9_]")
MARKDOWN_PATTERNS = [
(re.compile(r"```[\s\S]*?```"), lambda m: m.group(0).replace("```", "")),
(re.compile(r"`([^`]+)`"), r"\1"),
(re.compile(r"\*\*([^*]+)\*\*"), r"\1"),
(re.compile(r"\*([^*]+)\*"), r"\1"),
(re.compile(r"__([^_]+)__"), r"\1"),
# Avoid stripping underscores inside identifiers like model keys.
(re.compile(r"(?<!\w)_([^_\n]+)_(?!\w)"), r"\1"),
(re.compile(r"^#{1,6}\s*", re.MULTILINE), ""),
(re.compile(r"^>\s?", re.MULTILINE), ""),
(re.compile(r"\[([^\]]+)\]\(([^)]+)\)"), r"\1: \2"),
(re.compile(r"^[-*]\s+", re.MULTILINE), "- "),
(re.compile(r"^\d+\.\s+", re.MULTILINE), "- "),
(re.compile(r"\n{3,}"), "\n\n"),
]
def __init__(self, bot):
self.bot = bot
self.ai_client = None
self.skills_manager = None
self.mcp_manager = None
self.model_profiles_path = Path("config/models.json")
self.model_profiles: Dict[str, Dict[str, Any]] = {}
self.active_model_key = "default"
self._ai_initialized = False
self._init_lock = asyncio.Lock()
@staticmethod
def _get_user_id(message: Message) -> str:
author = getattr(message, "author", None)
if not author:
return "unknown"
return (
getattr(author, "id", None)
or getattr(author, "user_openid", None)
or getattr(author, "member_openid", None)
or getattr(author, "union_openid", None)
or "unknown"
)
@staticmethod
def _build_skills_usage(command_name: str = "/skills") -> str:
return (
"技能命令:\n"
f"{command_name}{command_name} list\n"
f"{command_name} install <source> [skill_name]\n"
f" source 支持本地技能名、URL、owner/repo、owner/repo#branch、GitHub 仓库 URL(.git)\n"
f"{command_name} uninstall <skill_name>\n"
f"{command_name} reload <skill_name>"
)
@staticmethod
def _build_personality_usage() -> str:
return (
"人设命令:\n"
"/personality\n"
"/personality list\n"
"/personality set <key>\n"
"/personality add <name> <Introduction>\n"
" 兼容旧格式: <json> 或 管道 name|description|speaking_style|TRAIT1,TRAIT2|custom_instructions\n"
"/personality remove <key>"
)
@staticmethod
def _build_models_usage(command_name: str = "/models") -> str:
return (
"模型命令:\n"
f"{command_name}{command_name} list\n"
f"{command_name} current\n"
f"{command_name} add <model_name> (保持 provider/api_base/api_key 不变)\n"
f"{command_name} add <key> <provider> <model_name> [api_base]\n"
f"{command_name} add <key> <json>\n"
" json 字段provider, model_name, api_base, api_key, temperature, max_tokens, top_p, timeout\n"
f"{command_name} switch <key|index>\n"
f"{command_name} remove <key|index>"
)
@staticmethod
def _build_memory_usage(command_name: str = "/memory") -> str:
return (
"记忆命令:\n"
f"{command_name}{command_name} list [limit]\n"
f"{command_name} get <id>\n"
f"{command_name} add <content>\n"
f"{command_name} add <json>\n"
f"{command_name} update <id> <content or json>\n"
f"{command_name} delete <id>\n"
f"{command_name} search <query> [limit]\n"
"/clear (默认等价 /clear short)\n"
"/clear short\n"
"/clear long\n"
"/clear all"
)
@staticmethod
def _provider_map() -> Dict[str, ModelProvider]:
return {
"openai": ModelProvider.OPENAI,
"anthropic": ModelProvider.ANTHROPIC,
"deepseek": ModelProvider.DEEPSEEK,
"qwen": ModelProvider.QWEN,
"siliconflow": ModelProvider.OPENAI,
}
@classmethod
def _normalize_model_key(cls, raw_key: str) -> str:
key = raw_key.strip().lower().replace("-", "_").replace(" ", "_")
key = cls.MODEL_KEY_PATTERN.sub("_", key)
key = re.sub(r"_+", "_", key).strip("_")
if not key:
raise ValueError("模型 key 不能为空")
if key[0].isdigit():
key = f"model_{key}"
return key
@staticmethod
def _compact_model_key(raw_key: str) -> str:
return re.sub(r"[^a-z0-9]", "", (raw_key or "").strip().lower())
def _ordered_model_keys(self) -> list[str]:
return sorted(self.model_profiles.keys())
def _resolve_model_selector(self, selector: str) -> str:
raw = (selector or "").strip()
if not raw:
raise ValueError("模型 key 不能为空")
ordered_keys = self._ordered_model_keys()
if raw.isdigit():
index = int(raw)
if index < 1 or index > len(ordered_keys):
raise ValueError(
f"模型序号超出范围: {index},可选 1-{len(ordered_keys)}"
)
return ordered_keys[index - 1]
if raw in self.model_profiles:
return raw
normalized_selector: Optional[str]
try:
normalized_selector = self._normalize_model_key(raw)
except ValueError:
normalized_selector = None
if normalized_selector and normalized_selector in self.model_profiles:
return normalized_selector
normalized_candidates: Dict[str, list[str]] = {}
compact_candidates: Dict[str, list[str]] = {}
for key in ordered_keys:
try:
normalized_key = self._normalize_model_key(key)
except ValueError:
normalized_key = key.strip().lower()
normalized_candidates.setdefault(normalized_key, []).append(key)
compact_candidates.setdefault(
self._compact_model_key(normalized_key), []
).append(key)
if normalized_selector and normalized_selector in normalized_candidates:
matches = normalized_candidates[normalized_selector]
if len(matches) == 1:
return matches[0]
raise ValueError(f"匹配到多个模型 key请使用完整 key: {', '.join(matches)}")
compact_selector = self._compact_model_key(normalized_selector or raw)
if compact_selector in compact_candidates:
matches = compact_candidates[compact_selector]
if len(matches) == 1:
return matches[0]
raise ValueError(f"匹配到多个模型 key请使用完整 key: {', '.join(matches)}")
raise ValueError(f"模型配置不存在: {raw}")
@classmethod
def _parse_provider(cls, raw_provider: str) -> ModelProvider:
provider = cls._provider_map().get(raw_provider.strip().lower())
if not provider:
raise ValueError(f"不支持的 provider: {raw_provider}")
return provider
@staticmethod
def _coerce_float(value: Any, default: float) -> float:
try:
return float(value)
except (TypeError, ValueError):
return default
@staticmethod
def _coerce_int(value: Any, default: int) -> int:
try:
return int(value)
except (TypeError, ValueError):
return default
@staticmethod
def _coerce_bool(value: Any, default: bool) -> bool:
if isinstance(value, bool):
return value
if isinstance(value, str):
lowered = value.strip().lower()
if lowered in {"1", "true", "yes", "on"}:
return True
if lowered in {"0", "false", "no", "off"}:
return False
return default
if value is None:
return default
return bool(value)
@staticmethod
def _model_config_to_dict(
config: ModelConfig, include_api_key: bool = True
) -> Dict[str, Any]:
data: Dict[str, Any] = {
"provider": config.provider.value,
"model_name": config.model_name,
"temperature": config.temperature,
"max_tokens": config.max_tokens,
"top_p": config.top_p,
"frequency_penalty": config.frequency_penalty,
"presence_penalty": config.presence_penalty,
"timeout": config.timeout,
"stream": config.stream,
}
if config.api_base:
data["api_base"] = config.api_base
if include_api_key and config.api_key:
data["api_key"] = config.api_key
return data
def _model_config_from_dict(
self, raw: Dict[str, Any], fallback: ModelConfig
) -> ModelConfig:
provider = self._parse_provider(str(raw.get("provider") or fallback.provider.value))
model_name = str(raw.get("model_name") or fallback.model_name).strip()
if not model_name:
raise ValueError("model_name 不能为空")
api_key = raw.get("api_key")
if api_key is None:
api_key = fallback.api_key or Config.AI_API_KEY
api_key = str(api_key) if api_key else ""
api_base = raw.get("api_base", fallback.api_base)
api_base = str(api_base) if api_base else None
return ModelConfig(
provider=provider,
model_name=model_name,
api_key=api_key,
api_base=api_base,
temperature=self._coerce_float(raw.get("temperature"), fallback.temperature),
max_tokens=self._coerce_int(raw.get("max_tokens"), fallback.max_tokens),
top_p=self._coerce_float(raw.get("top_p"), fallback.top_p),
frequency_penalty=self._coerce_float(
raw.get("frequency_penalty"), fallback.frequency_penalty
),
presence_penalty=self._coerce_float(
raw.get("presence_penalty"), fallback.presence_penalty
),
timeout=self._coerce_int(raw.get("timeout"), fallback.timeout),
stream=self._coerce_bool(raw.get("stream"), fallback.stream),
)
def _save_model_profiles(self):
payload = {"active": self.active_model_key, "profiles": self.model_profiles}
self.model_profiles_path.parent.mkdir(parents=True, exist_ok=True)
with open(self.model_profiles_path, "w", encoding="utf-8") as f:
json.dump(payload, f, ensure_ascii=False, indent=2)
def _load_model_profiles(self, default_config: ModelConfig) -> ModelConfig:
payload: Dict[str, Any] = {}
if self.model_profiles_path.exists():
try:
with open(self.model_profiles_path, "r", encoding="utf-8") as f:
payload = json.load(f)
except Exception as exc:
logger.warning(f"load model profiles failed, reset to defaults: {exc}")
payload = {}
raw_profiles = payload.get("profiles")
profiles: Dict[str, Dict[str, Any]] = {}
if isinstance(raw_profiles, dict):
for raw_key, raw_profile in raw_profiles.items():
if not isinstance(raw_profile, dict):
continue
key_text = str(raw_key or "").strip()
if not key_text:
continue
try:
normalized_key = self._normalize_model_key(key_text)
except ValueError:
continue
if normalized_key in profiles and profiles[normalized_key] != raw_profile:
logger.warning(
f"duplicate model key after normalization, keep first: {normalized_key}"
)
continue
profiles[normalized_key] = raw_profile
if not profiles:
profiles = {
"default": self._model_config_to_dict(
default_config, include_api_key=False
)
}
active_raw = str(payload.get("active") or "").strip()
active = ""
if active_raw in profiles:
active = active_raw
elif active_raw:
try:
normalized_active = self._normalize_model_key(active_raw)
except ValueError:
normalized_active = ""
if normalized_active in profiles:
active = normalized_active
if not active:
active = "default" if "default" in profiles else sorted(profiles.keys())[0]
self.model_profiles = profiles
self.active_model_key = active
try:
active_config = self._model_config_from_dict(
self.model_profiles[self.active_model_key], default_config
)
except Exception as exc:
logger.warning(f"active model profile invalid, fallback to default: {exc}")
self.model_profiles = {
"default": self._model_config_to_dict(
default_config, include_api_key=False
)
}
self.active_model_key = "default"
active_config = default_config
self._save_model_profiles()
return active_config
def _ensure_model_profiles_ready(self):
if not self.ai_client:
return
if self.model_profiles and self.active_model_key in self.model_profiles:
return
active_config = self._load_model_profiles(self.ai_client.config)
if active_config != self.ai_client.config:
self.ai_client.switch_model(active_config)
def _plain_text(self, text: str) -> str:
if not text:
return ""
result = text
for pattern, replacement in self.MARKDOWN_PATTERNS:
result = pattern.sub(replacement, result)
return result.strip()
async def _reply_plain(self, message: Message, text: str):
await message.reply(content=self._plain_text(text))
def _register_skill_tools(self, skill_name: str) -> int:
if not self.skills_manager or not self.ai_client:
return 0
skill = self.skills_manager.get_skill(skill_name)
if not skill:
return 0
count = 0
for tool_name, tool_func in skill.get_tools().items():
full_tool_name = f"{skill_name}.{tool_name}"
self.ai_client.register_tool(
name=full_tool_name,
description=f"技能工具: {full_tool_name}",
parameters={"type": "object", "properties": {}},
function=tool_func,
source="skills",
)
count += 1
return count
async def _register_mcp_tools(self) -> int:
if not self.mcp_manager or not self.ai_client:
return 0
tools = await self.mcp_manager.get_all_tools_for_ai()
count = 0
for item in tools:
function_info = item.get("function") if isinstance(item, dict) else None
if not isinstance(function_info, dict):
continue
full_tool_name = function_info.get("name")
if not full_tool_name:
continue
parameters = function_info.get("parameters")
if not isinstance(parameters, dict):
parameters = {"type": "object", "properties": {}}
async def _mcp_proxy(_full_tool_name=full_tool_name, **kwargs):
if not self.mcp_manager:
raise RuntimeError("MCP manager not initialized")
return await self.mcp_manager.execute_tool(_full_tool_name, kwargs)
self.ai_client.register_tool(
name=full_tool_name,
description=f"MCP工具: {full_tool_name}",
parameters=parameters,
function=_mcp_proxy,
source="mcp",
)
count += 1
return count
def _parse_traits(self, traits_raw) -> list[PersonalityTrait]:
traits = []
if isinstance(traits_raw, str):
trait_names = [name.strip().upper() for name in traits_raw.split(",") if name.strip()]
elif isinstance(traits_raw, list):
trait_names = [str(name).strip().upper() for name in traits_raw if str(name).strip()]
else:
trait_names = []
for name in trait_names:
if name in PersonalityTrait.__members__:
traits.append(PersonalityTrait[name])
if not traits:
traits = [PersonalityTrait.FRIENDLY]
return traits
def _parse_personality_payload(self, key: str, payload: str) -> PersonalityProfile:
payload = payload.strip()
if payload.startswith("{"):
data = json.loads(payload)
return PersonalityProfile(
name=str(data.get("name") or key),
description=str(data.get("description") or "自定义人设"),
traits=self._parse_traits(data.get("traits", [])),
speaking_style=str(data.get("speaking_style") or "自然口语"),
example_responses=list(data.get("example_responses", [])),
custom_instructions=str(data.get("custom_instructions") or ""),
)
if "|" not in payload:
introduction = payload
return PersonalityProfile(
name=key,
description=introduction,
traits=[PersonalityTrait.FRIENDLY],
speaking_style="自然口语",
custom_instructions=introduction,
)
parts = [part.strip() for part in payload.split("|")]
name = parts[0] if len(parts) >= 1 and parts[0] else key
description = parts[1] if len(parts) >= 2 and parts[1] else "自定义人设"
speaking_style = parts[2] if len(parts) >= 3 and parts[2] else "自然口语"
traits = self._parse_traits(parts[3] if len(parts) >= 4 else "")
custom_instructions = parts[4] if len(parts) >= 5 else ""
return PersonalityProfile(
name=name,
description=description,
traits=traits,
speaking_style=speaking_style,
custom_instructions=custom_instructions,
)
async def _init_ai(self):
if self._ai_initialized:
return
try:
provider = self._provider_map().get(
Config.AI_PROVIDER.lower(), ModelProvider.OPENAI
)
config = ModelConfig(
provider=provider,
model_name=Config.AI_MODEL,
api_key=Config.AI_API_KEY,
api_base=Config.AI_API_BASE,
temperature=0.7,
)
config = self._load_model_profiles(config)
embed_config = None
if Config.AI_EMBED_PROVIDER and Config.AI_EMBED_MODEL:
embed_provider = self._provider_map().get(
Config.AI_EMBED_PROVIDER.lower(), ModelProvider.OPENAI
)
embed_config = ModelConfig(
provider=embed_provider,
model_name=Config.AI_EMBED_MODEL,
api_key=Config.AI_EMBED_API_KEY or Config.AI_API_KEY,
api_base=Config.AI_EMBED_API_BASE or Config.AI_API_BASE,
)
self.ai_client = AIClient(
model_config=config,
embed_config=embed_config,
data_dir=Path("data/ai"),
use_vector_db=Config.AI_USE_VECTOR_DB,
)
self.skills_manager = SkillsManager(Path("skills"))
await self.skills_manager.load_all_skills()
total_tools = 0
for skill_name in self.skills_manager.list_skills():
total_tools += self._register_skill_tools(skill_name)
logger.info(
f"技能系统初始化完成: {len(self.skills_manager.list_skills())} skills, {total_tools} tools"
)
try:
self.mcp_manager = MCPManager(Path("config/mcp.json"))
fs_server = FileSystemMCPServer(root_path=Path("data"))
await self.mcp_manager.register_server(fs_server)
mcp_tool_count = await self._register_mcp_tools()
logger.info(f"MCP 工具注册完成: {mcp_tool_count} tools")
except Exception as exc:
logger.warning(f"MCP 初始化失败: {exc}")
self._ai_initialized = True
logger.info("AI 系统初始化完成")
except Exception as exc:
logger.error(f"AI 初始化失败: {exc}")
import traceback
logger.error(traceback.format_exc())
async def handle_at_message(self, message: Message):
try:
if not self._ai_initialized:
async with self._init_lock:
if not self._ai_initialized:
await self._init_ai()
if not self.ai_client:
await self._reply_plain(message, "AI 初始化失败,请检查配置")
return
content = (message.content or "").strip()
user_id = self._get_user_id(message)
if content.startswith(f"<@!{self.bot.robot.id}>"):
content = content.replace(f"<@!{self.bot.robot.id}>", "").strip()
if not content:
return
if content.startswith("/"):
await self._handle_command(message, content)
return
response = await self.ai_client.chat(
user_id=user_id,
user_message=content,
use_memory=True,
use_tools=True,
)
await self._reply_plain(message, response)
except Exception as exc:
logger.error(f"处理消息失败: {exc}")
import traceback
logger.error(traceback.format_exc())
if isinstance(exc, (httpx.ReadTimeout, TimeoutError, asyncio.TimeoutError)):
await self._reply_plain(
message,
"模型响应超时,请稍后重试,或将当前模型配置的 timeout 调大(建议 120-180 秒)。",
)
return
await self._reply_plain(message, "消息处理失败,请稍后重试")
async def _handle_skills_command(self, message: Message, command: str):
if not self.skills_manager or not self.ai_client:
await self._reply_plain(message, "技能系统未初始化")
return
parts = command.split()
command_name = parts[0].lower()
action = parts[1].lower() if len(parts) > 1 else "list"
if action in {"list", "ls"} and len(parts) <= 2:
loaded = self.skills_manager.list_skills()
available = self.skills_manager.list_available_skills()
unloaded = [name for name in available if name not in loaded]
lines = ["技能状态:"]
lines.append("已加载: " + (", ".join(loaded) if loaded else ""))
lines.append("可安装: " + (", ".join(unloaded) if unloaded else ""))
lines.append(self._build_skills_usage(command_name))
await self._reply_plain(message, "\n".join(lines))
return
if action not in {"install", "uninstall", "remove", "reload"}:
await self._reply_plain(message, self._build_skills_usage(command_name))
return
if len(parts) < 3:
await self._reply_plain(message, self._build_skills_usage(command_name))
return
if action == "install":
source = parts[2]
desired_name = parts[3] if len(parts) > 3 else None
try:
source_key = self.skills_manager.normalize_skill_key(source)
except Exception:
source_key = None
if source_key and source_key in self.skills_manager.list_available_skills():
success = await self.skills_manager.load_skill(source_key)
if not success:
await self._reply_plain(message, f"加载技能失败: {source_key}")
return
tool_count = self._register_skill_tools(source_key)
await self._reply_plain(
message,
f"已加载本地技能: {source_key}\n注册工具: {tool_count}",
)
return
ok, result = self.skills_manager.install_skill_from_source(
source=source,
skill_name=desired_name,
overwrite=False,
)
if not ok:
await self._reply_plain(message, f"安装失败: {result}")
return
installed_key = result
success = await self.skills_manager.load_skill(installed_key)
if not success:
await self._reply_plain(message, f"安装成功但加载失败: {installed_key}")
return
tool_count = self._register_skill_tools(installed_key)
await self._reply_plain(
message,
f"已从来源安装并加载技能: {installed_key}\n注册工具: {tool_count}",
)
return
skill_name = parts[2]
try:
skill_key = self.skills_manager.normalize_skill_key(skill_name)
except Exception:
await self._reply_plain(message, f"非法技能名: {skill_name}")
return
if action in {"uninstall", "remove"}:
removed_tools = self.ai_client.unregister_tools_by_prefix(f"{skill_key}.")
removed = await self.skills_manager.uninstall_skill(skill_key, delete_files=True)
if not removed:
await self._reply_plain(message, f"卸载失败或技能不存在: {skill_key}")
return
await self._reply_plain(
message,
f"已卸载技能: {skill_key}\n注销工具: {removed_tools}",
)
return
self.ai_client.unregister_tools_by_prefix(f"{skill_key}.")
success = await self.skills_manager.reload_skill(skill_key)
if not success:
await self._reply_plain(message, f"重载失败: {skill_key}")
return
tool_count = self._register_skill_tools(skill_key)
await self._reply_plain(message, f"已重载技能: {skill_key}\n注册工具: {tool_count}")
async def _handle_personality_command(self, message: Message, command: str):
parts = command.split(maxsplit=3)
if len(parts) == 1:
current = self.ai_client.personality.current_personality
names = self.ai_client.list_personalities()
if current:
await self._reply_plain(
message,
f"当前人设: {current.name}\n简介: {current.description}\n可用: {', '.join(names)}",
)
else:
await self._reply_plain(message, "当前没有激活的人设")
return
action = parts[1].lower()
if action == "list":
names = self.ai_client.list_personalities()
await self._reply_plain(message, "可用人设: " + ", ".join(names))
return
if action in {"set", "use"}:
if len(parts) < 3:
await self._reply_plain(message, self._build_personality_usage())
return
key = parts[2]
if self.ai_client.set_personality(key):
await self._reply_plain(message, f"已切换人设: {key}")
else:
await self._reply_plain(message, f"人设不存在: {key}")
return
if action in {"add", "create"}:
if len(parts) < 4:
await self._reply_plain(message, self._build_personality_usage())
return
key = parts[2]
payload = parts[3]
try:
profile = self._parse_personality_payload(key, payload)
ok = self.ai_client.personality.add_personality(key, profile)
if not ok:
await self._reply_plain(message, f"新增人设失败: {key}")
return
self.ai_client.set_personality(key)
await self._reply_plain(message, f"已新增并切换人设: {key}")
except Exception as exc:
await self._reply_plain(message, f"新增人设失败: {exc}")
return
if action in {"remove", "delete"}:
if len(parts) < 3:
await self._reply_plain(message, self._build_personality_usage())
return
key = parts[2]
removed = self.ai_client.personality.remove_personality(key)
if removed:
await self._reply_plain(message, f"已删除人设: {key}")
else:
await self._reply_plain(message, f"删除失败(可能是默认人设或不存在): {key}")
return
# 兼容旧命令: /personality <name>
if self.ai_client.set_personality(parts[1]):
await self._reply_plain(message, f"已切换人设: {parts[1]}")
return
await self._reply_plain(message, self._build_personality_usage())
async def _handle_memory_command(self, message: Message, command: str):
if not self.ai_client:
await self._reply_plain(message, "AI 客户端未初始化")
return
user_id = self._get_user_id(message)
parts = command.split(maxsplit=3)
action = parts[1].lower() if len(parts) > 1 else "list"
if action in {"list", "ls"}:
limit = 10
if len(parts) >= 3:
try:
limit = max(1, min(100, int(parts[2])))
except ValueError:
await self._reply_plain(message, self._build_memory_usage())
return
memories = await self.ai_client.list_long_term_memories(user_id, limit=limit)
if not memories:
await self._reply_plain(message, "暂无长期记忆")
return
lines = ["长期记忆列表:"]
for memory in memories:
content = self._plain_text(memory.content).replace("\n", " ").strip()
if len(content) > 60:
content = content[:57] + "..."
lines.append(
f"- {memory.id} | 重要性={memory.importance:.2f} | {memory.timestamp.isoformat()} | {content}"
)
await self._reply_plain(message, "\n".join(lines))
return
if action == "get":
if len(parts) < 3:
await self._reply_plain(message, self._build_memory_usage())
return
memory_id = parts[2].strip()
memory = await self.ai_client.get_long_term_memory(user_id, memory_id)
if not memory:
await self._reply_plain(message, f"记忆不存在: {memory_id}")
return
meta_text = json.dumps(memory.metadata or {}, ensure_ascii=False)
await self._reply_plain(
message,
"记忆详情:\n"
f"id: {memory.id}\n"
f"重要性: {memory.importance:.2f}\n"
f"时间: {memory.timestamp.isoformat()}\n"
f"访问次数: {memory.access_count}\n"
f"内容: {memory.content}\n"
f"元数据: {meta_text}",
)
return
if action == "add":
if len(parts) < 3:
await self._reply_plain(message, self._build_memory_usage())
return
payload = " ".join(parts[2:]).strip()
content = payload
importance = 0.8
metadata = None
if payload.startswith("{"):
try:
data = json.loads(payload)
if not isinstance(data, dict):
raise ValueError("payload 必须是对象")
content = str(data.get("content") or "").strip()
importance = float(data.get("importance", 0.8))
raw_meta = data.get("metadata")
metadata = raw_meta if isinstance(raw_meta, dict) else None
except Exception as exc:
await self._reply_plain(message, f"JSON 解析失败: {exc}")
return
if not content:
await self._reply_plain(message, "内容不能为空")
return
memory = await self.ai_client.add_long_term_memory(
user_id=user_id,
content=content,
importance=importance,
metadata=metadata,
)
if not memory:
await self._reply_plain(message, "新增长期记忆失败")
return
await self._reply_plain(
message, f"已新增长期记忆: {memory.id} (重要性={memory.importance:.2f})"
)
return
if action in {"update", "set"}:
if len(parts) < 4:
await self._reply_plain(message, self._build_memory_usage())
return
memory_id = parts[2].strip()
payload = parts[3].strip()
content = payload
importance = None
metadata = None
if payload.startswith("{"):
try:
data = json.loads(payload)
if not isinstance(data, dict):
raise ValueError("payload 必须是对象")
if "content" in data:
content = str(data.get("content") or "")
else:
content = None
if "importance" in data:
importance = float(data.get("importance"))
raw_meta = data.get("metadata")
if raw_meta is not None:
if not isinstance(raw_meta, dict):
raise ValueError("metadata 必须是对象")
metadata = raw_meta
except Exception as exc:
await self._reply_plain(message, f"JSON 解析失败: {exc}")
return
updated = await self.ai_client.update_long_term_memory(
user_id=user_id,
memory_id=memory_id,
content=content,
importance=importance,
metadata=metadata,
)
if not updated:
await self._reply_plain(message, f"更新失败或记忆不存在: {memory_id}")
return
await self._reply_plain(
message,
f"已更新长期记忆: {memory_id} (重要性={updated.importance:.2f})",
)
return
if action in {"delete", "remove", "rm"}:
if len(parts) < 3:
await self._reply_plain(message, self._build_memory_usage())
return
memory_id = parts[2].strip()
deleted = await self.ai_client.delete_long_term_memory(user_id, memory_id)
if not deleted:
await self._reply_plain(message, f"删除失败或记忆不存在: {memory_id}")
return
await self._reply_plain(message, f"已删除长期记忆: {memory_id}")
return
if action in {"search", "find"}:
if len(parts) < 3:
await self._reply_plain(message, self._build_memory_usage())
return
query_payload = " ".join(parts[2:]).strip()
limit = 10
query = query_payload
if " " in query_payload:
possible_query, possible_limit = query_payload.rsplit(" ", 1)
if possible_limit.isdigit():
query = possible_query
limit = max(1, min(100, int(possible_limit)))
if not query:
await self._reply_plain(message, "搜索词不能为空")
return
memories = await self.ai_client.search_long_term_memories(
user_id=user_id,
query=query,
limit=limit,
)
if not memories:
await self._reply_plain(message, f"没有匹配到相关记忆: {query}")
return
lines = [f"搜索结果({len(memories)} 条):"]
for memory in memories:
content = self._plain_text(memory.content).replace("\n", " ").strip()
if len(content) > 60:
content = content[:57] + "..."
lines.append(f"- {memory.id} | 重要性={memory.importance:.2f} | {content}")
await self._reply_plain(message, "\n".join(lines))
return
await self._reply_plain(message, self._build_memory_usage())
async def _handle_models_command(self, message: Message, command: str):
if not self.ai_client:
await self._reply_plain(message, "AI 客户端未初始化")
return
self._ensure_model_profiles_ready()
parts = command.split(maxsplit=3)
action = parts[1].lower() if len(parts) > 1 else "list"
if action in {"list", "ls"} and len(parts) <= 2:
lines = [f"当前模型配置: {self.active_model_key}"]
ordered_keys = self._ordered_model_keys()
for idx, key in enumerate(ordered_keys, start=1):
profile = self.model_profiles.get(key, {})
marker = "*" if key == self.active_model_key else "-"
provider = str(profile.get("provider") or "?")
model_name = str(profile.get("model_name") or "?")
lines.append(f"{marker} {idx}. {key}: {provider}/{model_name}")
if ordered_keys:
lines.append(f"提示: 可用 /models switch <序号>,例如 /models switch 2")
lines.append(self._build_models_usage("/models"))
await self._reply_plain(message, "\n".join(lines))
return
if action in {"current", "show"}:
config = self.ai_client.config
await self._reply_plain(
message,
"当前模型:\n"
f"配置名: {self.active_model_key}\n"
f"供应商: {config.provider.value}\n"
f"模型: {config.model_name}\n"
f"API 地址: {config.api_base or '-'}",
)
return
if action in {"switch", "set", "use"}:
if len(parts) < 3:
await self._reply_plain(message, self._build_models_usage())
return
try:
key = self._resolve_model_selector(parts[2])
except ValueError as exc:
await self._reply_plain(message, str(exc))
return
try:
config = self._model_config_from_dict(
self.model_profiles[key], self.ai_client.config
)
self.ai_client.switch_model(config)
except Exception as exc:
await self._reply_plain(message, f"切换模型失败: {exc}")
return
self.active_model_key = key
self._save_model_profiles()
await self._reply_plain(
message, f"已切换模型: {key} ({config.provider.value}/{config.model_name})"
)
return
if action in {"add", "create"}:
# 快捷方式: /models add <model_name>
# 仅替换 model_name保留 provider/api_base/api_key。
if len(parts) == 3:
model_name = parts[2].strip()
if not model_name:
await self._reply_plain(message, self._build_models_usage())
return
key = self._normalize_model_key(model_name)
config = ModelConfig(
provider=self.ai_client.config.provider,
model_name=model_name,
api_key=self.ai_client.config.api_key,
api_base=self.ai_client.config.api_base,
temperature=self.ai_client.config.temperature,
max_tokens=self.ai_client.config.max_tokens,
top_p=self.ai_client.config.top_p,
frequency_penalty=self.ai_client.config.frequency_penalty,
presence_penalty=self.ai_client.config.presence_penalty,
timeout=self.ai_client.config.timeout,
stream=self.ai_client.config.stream,
)
self.model_profiles[key] = self._model_config_to_dict(
config, include_api_key=False
)
self.active_model_key = key
self._save_model_profiles()
self.ai_client.switch_model(config)
await self._reply_plain(
message,
f"已保存并切换模型: {key} ({config.provider.value}/{config.model_name})",
)
return
if len(parts) < 4:
await self._reply_plain(message, self._build_models_usage())
return
try:
key = self._normalize_model_key(parts[2])
except ValueError as exc:
await self._reply_plain(message, str(exc))
return
payload = parts[3].strip()
include_api_key = False
try:
if payload.startswith("{"):
raw_profile = json.loads(payload)
if not isinstance(raw_profile, dict):
raise ValueError("模型参数必须是 JSON 对象")
else:
payload_parts = payload.split()
if len(payload_parts) < 2:
raise ValueError(
"用法: /models add <key> <provider> <model_name> [api_base]"
)
raw_profile = {
"provider": payload_parts[0],
"model_name": payload_parts[1],
}
if len(payload_parts) >= 3:
raw_profile["api_base"] = payload_parts[2]
include_api_key = bool(raw_profile.get("api_key"))
config = self._model_config_from_dict(raw_profile, self.ai_client.config)
except Exception as exc:
await self._reply_plain(message, f"新增模型失败: {exc}")
return
self.model_profiles[key] = self._model_config_to_dict(
config, include_api_key=include_api_key
)
self.active_model_key = key
self._save_model_profiles()
self.ai_client.switch_model(config)
await self._reply_plain(
message,
f"已保存并切换模型: {key} ({config.provider.value}/{config.model_name})",
)
return
if action in {"remove", "delete", "rm"}:
if len(parts) < 3:
await self._reply_plain(message, self._build_models_usage())
return
try:
key = self._resolve_model_selector(parts[2])
except ValueError as exc:
await self._reply_plain(message, str(exc))
return
if key == "default":
await self._reply_plain(message, "默认模型配置不能删除")
return
del self.model_profiles[key]
switched_to = None
if self.active_model_key == key:
fallback_key = (
"default"
if "default" in self.model_profiles
else (sorted(self.model_profiles.keys())[0] if self.model_profiles else None)
)
if not fallback_key:
self.model_profiles["default"] = self._model_config_to_dict(
self.ai_client.config, include_api_key=False
)
fallback_key = "default"
fallback_config = self._model_config_from_dict(
self.model_profiles[fallback_key], self.ai_client.config
)
self.ai_client.switch_model(fallback_config)
self.active_model_key = fallback_key
switched_to = fallback_key
self._save_model_profiles()
if switched_to:
await self._reply_plain(
message, f"已删除模型: {key},已切换到: {switched_to}"
)
else:
await self._reply_plain(message, f"已删除模型: {key}")
return
await self._reply_plain(message, self._build_models_usage())
async def _handle_command(self, message: Message, command: str):
user_id = self._get_user_id(message)
if command == "/help":
await self._reply_plain(
message,
"命令帮助\n"
"====================\n"
"基础命令\n"
"/help\n"
"/clear (默认等价 /clear short)\n"
"/clear short\n"
"/clear long\n"
"/clear all\n"
"\n"
"人设命令\n"
"/personality\n"
"/personality list\n"
"/personality set <key>\n"
"/personality add <name> <Introduction>\n"
"/personality remove <key>\n"
"\n"
"技能命令\n"
"/skills\n"
"/skills install <source> [skill_name]\n"
"/skills uninstall <skill_name>\n"
"/skills reload <skill_name>\n"
"\n"
"模型命令\n"
"/models\n"
"/models current\n"
"/models add <model_name>\n"
"/models add <key> <provider> <model_name> [api_base]\n"
"/models switch <key|index>\n"
"/models remove <key|index>\n"
"\n"
"记忆命令\n"
"/memory\n"
"/memory get <id>\n"
"/memory add <content|json>\n"
"/memory update <id> <content|json>\n"
"/memory delete <id>\n"
"/memory search <query> [limit]\n"
"\n"
"任务命令\n"
"/task <task_id>",
)
return
if command.startswith("/clear"):
clear_parts = command.split(maxsplit=1)
clear_scope = clear_parts[1].strip().lower() if len(clear_parts) > 1 else "short"
if clear_scope in {"all", ""}:
cleared_all = await self.ai_client.clear_all_memory(user_id)
if cleared_all:
await self._reply_plain(message, "已清除短期记忆和长期记忆")
else:
await self._reply_plain(
message,
"短期记忆已清除,但长期记忆清除失败",
)
return
if clear_scope in {"short", "short_term"}:
self.ai_client.clear_memory(user_id)
await self._reply_plain(message, "已清除短期记忆")
return
if clear_scope in {"long", "long_term"}:
cleared_long = await self.ai_client.clear_long_term_memory(user_id)
if cleared_long:
await self._reply_plain(message, "已清除长期记忆")
else:
await self._reply_plain(message, "清除长期记忆失败")
return
await self._reply_plain(
message,
"用法:\n/clear\n/clear short\n/clear long\n/clear all",
)
return
if command.startswith("/personality"):
await self._handle_personality_command(message, command)
return
if command.startswith("/skills") or command.startswith("/skill"):
await self._handle_skills_command(message, command)
return
if command.startswith("/models") or command.startswith("/model"):
await self._handle_models_command(message, command)
return
if command.startswith("/memory") or command.startswith("/mem"):
await self._handle_memory_command(message, command)
return
if command.startswith("/task"):
parts = command.split(maxsplit=1)
if len(parts) == 2:
task_id = parts[1]
status = self.ai_client.get_task_status(task_id)
if status:
await self._reply_plain(
message,
"任务状态:\n"
f"标题: {status['title']}\n"
f"状态: {status['status']}\n"
f"进度: {status['progress'] * 100:.1f}%\n"
f"步骤: {status['completed_steps']}/{status['total_steps']}",
)
else:
await self._reply_plain(message, "任务不存在")
else:
await self._reply_plain(message, "用法: /task <task_id>")
return
await self._reply_plain(message, "未知命令,请输入 /help 查看帮助")