Enhance logging in AIClient and OpenAIModel to include detailed information about tool usage and model responses. Added logging for tool names and counts during requests and responses, improving traceability and debugging capabilities.
This commit is contained in:
@@ -187,14 +187,44 @@ class AIClient:
|
||||
|
||||
# 准备工具
|
||||
tools = None
|
||||
tool_names: List[str] = []
|
||||
if use_tools and self.tools.list():
|
||||
tools = self.tools.to_openai_format()
|
||||
tool_names = [tool.name for tool in self.tools.list()]
|
||||
|
||||
logger.info(
|
||||
"LLM请求: "
|
||||
f"user_id={user_id}, use_memory={use_memory}, use_tools={use_tools}, "
|
||||
f"registered_tools={len(tool_names)}, sent_tools={len(tools or [])}, "
|
||||
f"tool_names={self._preview_log_payload(tool_names)}"
|
||||
)
|
||||
logger.info(
|
||||
"LLM输入: "
|
||||
f"user_message={self._preview_log_payload(user_message)}"
|
||||
)
|
||||
|
||||
# 调用模型
|
||||
if stream:
|
||||
return self._chat_stream(messages, tools, **kwargs)
|
||||
else:
|
||||
response = await self.model.chat(messages, tools, **kwargs)
|
||||
response_tool_count = len(response.tool_calls or [])
|
||||
response_tool_names = []
|
||||
for tool_call in response.tool_calls or []:
|
||||
if isinstance(tool_call, dict):
|
||||
function_info = tool_call.get("function") or {}
|
||||
response_tool_names.append(function_info.get("name"))
|
||||
else:
|
||||
function_info = getattr(tool_call, "function", None)
|
||||
response_tool_names.append(
|
||||
getattr(function_info, "name", None) if function_info else None
|
||||
)
|
||||
logger.info(
|
||||
"LLM首轮输出: "
|
||||
f"tool_calls={response_tool_count}, "
|
||||
f"tool_names={self._preview_log_payload(response_tool_names)}, "
|
||||
f"content={self._preview_log_payload(response.content)}"
|
||||
)
|
||||
|
||||
# 处理工具调用
|
||||
if response.tool_calls:
|
||||
@@ -312,7 +342,12 @@ class AIClient:
|
||||
))
|
||||
|
||||
# 再次调用模型获取最终响应
|
||||
return await self.model.chat(messages, tools, **kwargs)
|
||||
final_response = await self.model.chat(messages, tools, **kwargs)
|
||||
logger.info(
|
||||
"LLM最终输出: "
|
||||
f"content={self._preview_log_payload(final_response.content)}"
|
||||
)
|
||||
return final_response
|
||||
|
||||
def _parse_tool_call(self, tool_call: Any) -> Tuple[Optional[str], Dict[str, Any], Optional[str]]:
|
||||
"""兼容不同 SDK 返回的工具调用结构。"""
|
||||
|
||||
Reference in New Issue
Block a user