Please enter the commit message for your changes. Lines starting

with '#' will be ignored, and an empty message aborts the commit.

On branch main
Your branch is up to date with 'origin/main'.

Changes to be committed:
	new file:   .claude/skills/algorithmic-art/.openskills.json
	new file:   .claude/skills/algorithmic-art/LICENSE.txt
	new file:   .claude/skills/algorithmic-art/SKILL.md
	new file:   .claude/skills/algorithmic-art/templates/generator_template.js
	new file:   .claude/skills/algorithmic-art/templates/viewer.html
	new file:   .claude/skills/brand-guidelines/.openskills.json
	new file:   .claude/skills/brand-guidelines/LICENSE.txt
	new file:   .claude/skills/brand-guidelines/SKILL.md
	new file:   .claude/skills/canvas-design/.openskills.json
	new file:   .claude/skills/canvas-design/LICENSE.txt
	new file:   .claude/skills/canvas-design/SKILL.md
	new file:   .claude/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/DMMono-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Gloock-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Italiana-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Jura-Light.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Jura-Medium.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Jura-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Lora-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Lora-Italic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Lora-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Lora-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Outfit-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/Tektur-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf
	new file:   .claude/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt
	new file:   .claude/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf
	new file:   .claude/skills/doc-coauthoring/.openskills.json
	new file:   .claude/skills/doc-coauthoring/SKILL.md
	new file:   .claude/skills/docx/.openskills.json
	new file:   .claude/skills/docx/LICENSE.txt
	new file:   .claude/skills/docx/SKILL.md
	new file:   .claude/skills/docx/scripts/__init__.py
	new file:   .claude/skills/docx/scripts/accept_changes.py
	new file:   .claude/skills/docx/scripts/comment.py
	new file:   .claude/skills/docx/scripts/office/helpers/__init__.py
	new file:   .claude/skills/docx/scripts/office/helpers/merge_runs.py
	new file:   .claude/skills/docx/scripts/office/helpers/simplify_redlines.py
	new file:   .claude/skills/docx/scripts/office/pack.py
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/mce/mc.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/microsoft/wml-2010.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/microsoft/wml-2012.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/microsoft/wml-2018.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
	new file:   .claude/skills/docx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
	new file:   .claude/skills/docx/scripts/office/soffice.py
	new file:   .claude/skills/docx/scripts/office/unpack.py
	new file:   .claude/skills/docx/scripts/office/validate.py
	new file:   .claude/skills/docx/scripts/office/validators/__init__.py
	new file:   .claude/skills/docx/scripts/office/validators/base.py
	new file:   .claude/skills/docx/scripts/office/validators/docx.py
	new file:   .claude/skills/docx/scripts/office/validators/pptx.py
	new file:   .claude/skills/docx/scripts/office/validators/redlining.py
	new file:   .claude/skills/docx/scripts/templates/comments.xml
	new file:   .claude/skills/docx/scripts/templates/commentsExtended.xml
	new file:   .claude/skills/docx/scripts/templates/commentsExtensible.xml
	new file:   .claude/skills/docx/scripts/templates/commentsIds.xml
	new file:   .claude/skills/docx/scripts/templates/people.xml
	new file:   .claude/skills/frontend-design/.openskills.json
	new file:   .claude/skills/frontend-design/LICENSE.txt
	new file:   .claude/skills/frontend-design/SKILL.md
	new file:   .claude/skills/internal-comms/.openskills.json
	new file:   .claude/skills/internal-comms/LICENSE.txt
	new file:   .claude/skills/internal-comms/SKILL.md
	new file:   .claude/skills/internal-comms/examples/3p-updates.md
	new file:   .claude/skills/internal-comms/examples/company-newsletter.md
	new file:   .claude/skills/internal-comms/examples/faq-answers.md
	new file:   .claude/skills/internal-comms/examples/general-comms.md
	new file:   .claude/skills/mcp-builder/.openskills.json
	new file:   .claude/skills/mcp-builder/LICENSE.txt
	new file:   .claude/skills/mcp-builder/SKILL.md
	new file:   .claude/skills/mcp-builder/reference/evaluation.md
	new file:   .claude/skills/mcp-builder/reference/mcp_best_practices.md
	new file:   .claude/skills/mcp-builder/reference/node_mcp_server.md
	new file:   .claude/skills/mcp-builder/reference/python_mcp_server.md
	new file:   .claude/skills/mcp-builder/scripts/connections.py
	new file:   .claude/skills/mcp-builder/scripts/evaluation.py
	new file:   .claude/skills/mcp-builder/scripts/example_evaluation.xml
	new file:   .claude/skills/mcp-builder/scripts/requirements.txt
	new file:   .claude/skills/pdf/.openskills.json
	new file:   .claude/skills/pdf/LICENSE.txt
	new file:   .claude/skills/pdf/SKILL.md
	new file:   .claude/skills/pdf/forms.md
	new file:   .claude/skills/pdf/reference.md
	new file:   .claude/skills/pdf/scripts/check_bounding_boxes.py
	new file:   .claude/skills/pdf/scripts/check_fillable_fields.py
	new file:   .claude/skills/pdf/scripts/convert_pdf_to_images.py
	new file:   .claude/skills/pdf/scripts/create_validation_image.py
	new file:   .claude/skills/pdf/scripts/extract_form_field_info.py
	new file:   .claude/skills/pdf/scripts/extract_form_structure.py
	new file:   .claude/skills/pdf/scripts/fill_fillable_fields.py
	new file:   .claude/skills/pdf/scripts/fill_pdf_form_with_annotations.py
	new file:   .claude/skills/pptx/.openskills.json
	new file:   .claude/skills/pptx/LICENSE.txt
	new file:   .claude/skills/pptx/SKILL.md
	new file:   .claude/skills/pptx/editing.md
	new file:   .claude/skills/pptx/pptxgenjs.md
	new file:   .claude/skills/pptx/scripts/__init__.py
	new file:   .claude/skills/pptx/scripts/add_slide.py
	new file:   .claude/skills/pptx/scripts/clean.py
	new file:   .claude/skills/pptx/scripts/office/helpers/__init__.py
	new file:   .claude/skills/pptx/scripts/office/helpers/merge_runs.py
	new file:   .claude/skills/pptx/scripts/office/helpers/simplify_redlines.py
	new file:   .claude/skills/pptx/scripts/office/pack.py
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/mce/mc.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/microsoft/wml-2010.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/microsoft/wml-2012.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/microsoft/wml-2018.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
	new file:   .claude/skills/pptx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
	new file:   .claude/skills/pptx/scripts/office/soffice.py
	new file:   .claude/skills/pptx/scripts/office/unpack.py
	new file:   .claude/skills/pptx/scripts/office/validate.py
	new file:   .claude/skills/pptx/scripts/office/validators/__init__.py
	new file:   .claude/skills/pptx/scripts/office/validators/base.py
	new file:   .claude/skills/pptx/scripts/office/validators/docx.py
	new file:   .claude/skills/pptx/scripts/office/validators/pptx.py
	new file:   .claude/skills/pptx/scripts/office/validators/redlining.py
	new file:   .claude/skills/pptx/scripts/thumbnail.py
	new file:   .claude/skills/skill-creator/.openskills.json
	new file:   .claude/skills/skill-creator/LICENSE.txt
	new file:   .claude/skills/skill-creator/SKILL.md
	new file:   .claude/skills/skill-creator/agents/analyzer.md
	new file:   .claude/skills/skill-creator/agents/comparator.md
	new file:   .claude/skills/skill-creator/agents/grader.md
	new file:   .claude/skills/skill-creator/assets/eval_review.html
	new file:   .claude/skills/skill-creator/eval-viewer/generate_review.py
	new file:   .claude/skills/skill-creator/eval-viewer/viewer.html
	new file:   .claude/skills/skill-creator/references/schemas.md
	new file:   .claude/skills/skill-creator/scripts/__init__.py
	new file:   .claude/skills/skill-creator/scripts/aggregate_benchmark.py
	new file:   .claude/skills/skill-creator/scripts/generate_report.py
	new file:   .claude/skills/skill-creator/scripts/improve_description.py
	new file:   .claude/skills/skill-creator/scripts/package_skill.py
	new file:   .claude/skills/skill-creator/scripts/quick_validate.py
	new file:   .claude/skills/skill-creator/scripts/run_eval.py
	new file:   .claude/skills/skill-creator/scripts/run_loop.py
	new file:   .claude/skills/skill-creator/scripts/utils.py
	new file:   .claude/skills/slack-gif-creator/.openskills.json
	new file:   .claude/skills/slack-gif-creator/LICENSE.txt
	new file:   .claude/skills/slack-gif-creator/SKILL.md
	new file:   .claude/skills/slack-gif-creator/core/easing.py
	new file:   .claude/skills/slack-gif-creator/core/frame_composer.py
	new file:   .claude/skills/slack-gif-creator/core/gif_builder.py
	new file:   .claude/skills/slack-gif-creator/core/validators.py
	new file:   .claude/skills/slack-gif-creator/requirements.txt
	new file:   .claude/skills/template/.openskills.json
	new file:   .claude/skills/template/SKILL.md
	new file:   .claude/skills/theme-factory/.openskills.json
	new file:   .claude/skills/theme-factory/LICENSE.txt
	new file:   .claude/skills/theme-factory/SKILL.md
	new file:   .claude/skills/theme-factory/theme-showcase.pdf
	new file:   .claude/skills/theme-factory/themes/arctic-frost.md
	new file:   .claude/skills/theme-factory/themes/botanical-garden.md
	new file:   .claude/skills/theme-factory/themes/desert-rose.md
	new file:   .claude/skills/theme-factory/themes/forest-canopy.md
	new file:   .claude/skills/theme-factory/themes/golden-hour.md
	new file:   .claude/skills/theme-factory/themes/midnight-galaxy.md
	new file:   .claude/skills/theme-factory/themes/modern-minimalist.md
	new file:   .claude/skills/theme-factory/themes/ocean-depths.md
	new file:   .claude/skills/theme-factory/themes/sunset-boulevard.md
	new file:   .claude/skills/theme-factory/themes/tech-innovation.md
	new file:   .claude/skills/web-artifacts-builder/.openskills.json
	new file:   .claude/skills/web-artifacts-builder/LICENSE.txt
	new file:   .claude/skills/web-artifacts-builder/SKILL.md
	new file:   .claude/skills/web-artifacts-builder/scripts/bundle-artifact.sh
	new file:   .claude/skills/web-artifacts-builder/scripts/init-artifact.sh
	new file:   .claude/skills/web-artifacts-builder/scripts/shadcn-components.tar.gz
	new file:   .claude/skills/webapp-testing/.openskills.json
	new file:   .claude/skills/webapp-testing/LICENSE.txt
	new file:   .claude/skills/webapp-testing/SKILL.md
	new file:   .claude/skills/webapp-testing/examples/console_logging.py
	new file:   .claude/skills/webapp-testing/examples/element_discovery.py
	new file:   .claude/skills/webapp-testing/examples/static_html_automation.py
	new file:   .claude/skills/webapp-testing/scripts/with_server.py
	new file:   .claude/skills/xlsx/.openskills.json
	new file:   .claude/skills/xlsx/LICENSE.txt
	new file:   .claude/skills/xlsx/SKILL.md
	new file:   .claude/skills/xlsx/scripts/office/helpers/__init__.py
	new file:   .claude/skills/xlsx/scripts/office/helpers/merge_runs.py
	new file:   .claude/skills/xlsx/scripts/office/helpers/simplify_redlines.py
	new file:   .claude/skills/xlsx/scripts/office/pack.py
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/mce/mc.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2010.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2012.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2018.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
	new file:   .claude/skills/xlsx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
	new file:   .claude/skills/xlsx/scripts/office/soffice.py
	new file:   .claude/skills/xlsx/scripts/office/unpack.py
	new file:   .claude/skills/xlsx/scripts/office/validate.py
	new file:   .claude/skills/xlsx/scripts/office/validators/__init__.py
	new file:   .claude/skills/xlsx/scripts/office/validators/base.py
	new file:   .claude/skills/xlsx/scripts/office/validators/docx.py
	new file:   .claude/skills/xlsx/scripts/office/validators/pptx.py
	new file:   .claude/skills/xlsx/scripts/office/validators/redlining.py
	new file:   .claude/skills/xlsx/scripts/recalc.py
	new file:   .env.example
	new file:   .gitignore
	new file:   config/mcp.json
	new file:   config/models.json
	new file:   config/personalities.json
	new file:   docs/AGENTS.md
	new file:   docs/AI_IMPLEMENTATION.md
	new file:   docs/AI_INTEGRATION_COMPLETE.md
	new file:   docs/AI_QUICKSTART.md
	new file:   docs/AI_SUMMARY.md
	new file:   docs/CHANGELOG.md
	new file:   docs/CONFIG_GUIDE.md
	new file:   docs/FIXES.md
	new file:   docs/PROJECT_REFACTOR.md
	new file:   docs/README.md
	new file:   docs/README_INDEX.md
	new file:   examples/ai_example.py
	new file:   main.py
	new file:   pytest.ini
	new file:   requirements.txt
	new file:   scripts/migrate_to_vector_db.py
	new file:   skills/cmd_zip_skill/README.md
	new file:   skills/cmd_zip_skill/__init__.py
	new file:   skills/cmd_zip_skill/main.py
	new file:   skills/cmd_zip_skill/skill.json
	new file:   skills/cmd_zip_skill_1772465404375/README.md
	new file:   skills/cmd_zip_skill_1772465404375/__init__.py
	new file:   skills/cmd_zip_skill_1772465404375/main.py
	new file:   skills/cmd_zip_skill_1772465404375/skill.json
	new file:   skills/cmd_zip_skill_1772465434774/README.md
	new file:   skills/cmd_zip_skill_1772465434774/__init__.py
	new file:   skills/cmd_zip_skill_1772465434774/main.py
	new file:   skills/cmd_zip_skill_1772465434774/skill.json
	new file:   skills/cmd_zip_skill_1772465467809/README.md
	new file:   skills/cmd_zip_skill_1772465467809/__init__.py
	new file:   skills/cmd_zip_skill_1772465467809/main.py
	new file:   skills/cmd_zip_skill_1772465467809/skill.json
	new file:   skills/cmd_zip_skill_1772465652075/README.md
	new file:   skills/cmd_zip_skill_1772465652075/__init__.py
	new file:   skills/cmd_zip_skill_1772465652075/main.py
	new file:   skills/cmd_zip_skill_1772465652075/skill.json
	new file:   skills/cmd_zip_skill_1772465685352/README.md
	new file:   skills/cmd_zip_skill_1772465685352/__init__.py
	new file:   skills/cmd_zip_skill_1772465685352/main.py
	new file:   skills/cmd_zip_skill_1772465685352/skill.json
	new file:   skills/cmd_zip_skill_1772465936294/README.md
	new file:   skills/cmd_zip_skill_1772465936294/__init__.py
	new file:   skills/cmd_zip_skill_1772465936294/main.py
	new file:   skills/cmd_zip_skill_1772465936294/skill.json
	new file:   skills/cmd_zip_skill_1772465966322/README.md
	new file:   skills/cmd_zip_skill_1772465966322/__init__.py
	new file:   skills/cmd_zip_skill_1772465966322/main.py
	new file:   skills/cmd_zip_skill_1772465966322/skill.json
	new file:   skills/cmd_zip_skill_1772466071278/README.md
	new file:   skills/cmd_zip_skill_1772466071278/__init__.py
	new file:   skills/cmd_zip_skill_1772466071278/main.py
	new file:   skills/cmd_zip_skill_1772466071278/skill.json
	new file:   skills/skills_creator/README.md
	new file:   skills/skills_creator/__init__.py
	new file:   skills/skills_creator/main.py
	new file:   skills/skills_creator/skill.json
	new file:   src/__init__.py
	new file:   src/ai/__init__.py
	new file:   src/ai/base.py
	new file:   src/ai/client.py
	new file:   src/ai/docs/README.md
	new file:   src/ai/mcp/__init__.py
	new file:   src/ai/mcp/base.py
	new file:   src/ai/mcp/servers/__init__.py
	new file:   src/ai/mcp/servers/filesystem.py
	new file:   src/ai/memory.py
	new file:   src/ai/models/__init__.py
	new file:   src/ai/models/anthropic_model.py
	new file:   src/ai/models/openai_model.py
	new file:   src/ai/personality.py
	new file:   src/ai/skills/__init__.py
	new file:   src/ai/skills/base.py
	new file:   src/ai/task_manager.py
	new file:   src/ai/vector_store/__init__.py
	new file:   src/ai/vector_store/base.py
	new file:   src/ai/vector_store/chroma_store.py
	new file:   src/ai/vector_store/json_store.py
	new file:   src/core/__init__.py
	new file:   src/core/bot.py
	new file:   src/core/config.py
	new file:   src/handlers/__init__.py
	new file:   src/handlers/message_handler.py
	new file:   src/handlers/message_handler_ai.py
	new file:   src/utils/__init__.py
	new file:   src/utils/logger.py
	new file:   start.bat
	new file:   tests/test_ai.py
This commit is contained in:
Mimikko-zeus
2026-03-03 01:23:23 +08:00
parent b7940f2ff6
commit ae208af6a9
453 changed files with 99883 additions and 0 deletions

4
src/__init__.py Normal file
View File

@@ -0,0 +1,4 @@
"""
QQ机器人主包
"""
__version__ = "1.0.0"

14
src/ai/__init__.py Normal file
View File

@@ -0,0 +1,14 @@
"""
AI模块 - 提供AI模型接入、人格系统、记忆系统和长任务处理能力
"""
from .client import AIClient
from .personality import PersonalitySystem
from .memory import MemorySystem
from .task_manager import LongTaskManager
__all__ = [
'AIClient',
'PersonalitySystem',
'MemorySystem',
'LongTaskManager'
]

130
src/ai/base.py Normal file
View File

@@ -0,0 +1,130 @@
"""
AI模型基类和接口定义
"""
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, AsyncIterator
from dataclasses import dataclass
from enum import Enum
class ModelProvider(Enum):
"""AI模型提供商"""
OPENAI = "openai"
ANTHROPIC = "anthropic"
DEEPSEEK = "deepseek"
QWEN = "qwen"
LOCAL = "local"
@dataclass
class Message:
"""消息对象"""
role: str # system, user, assistant, tool
content: str
name: Optional[str] = None
tool_calls: Optional[List[Dict]] = None
tool_call_id: Optional[str] = None
@dataclass
class ModelConfig:
"""模型配置"""
provider: ModelProvider
model_name: str
api_key: Optional[str] = None
api_base: Optional[str] = None
temperature: float = 0.7
max_tokens: int = 4096
top_p: float = 1.0
frequency_penalty: float = 0.0
presence_penalty: float = 0.0
timeout: int = 60
stream: bool = False
class BaseAIModel(ABC):
"""AI模型基类"""
def __init__(self, config: ModelConfig):
self.config = config
@abstractmethod
async def chat(
self,
messages: List[Message],
tools: Optional[List[Dict]] = None,
**kwargs
) -> Message:
"""同步对话"""
pass
@abstractmethod
async def chat_stream(
self,
messages: List[Message],
tools: Optional[List[Dict]] = None,
**kwargs
) -> AsyncIterator[str]:
"""流式对话"""
pass
@abstractmethod
async def embed(self, text: str) -> List[float]:
"""文本嵌入"""
pass
@dataclass
class ToolDefinition:
"""工具定义"""
name: str
description: str
parameters: Dict[str, Any]
function: callable
class ToolRegistry:
"""工具注册表"""
def __init__(self):
self._tools: Dict[str, ToolDefinition] = {}
def register(self, tool: ToolDefinition):
"""注册工具"""
self._tools[tool.name] = tool
def unregister(self, name: str) -> bool:
"""卸载工具"""
if name not in self._tools:
return False
del self._tools[name]
return True
def unregister_by_prefix(self, prefix: str) -> int:
"""按前缀卸载工具"""
to_remove = [name for name in self._tools.keys() if name.startswith(prefix)]
for name in to_remove:
del self._tools[name]
return len(to_remove)
def get(self, name: str) -> Optional[ToolDefinition]:
"""获取工具"""
return self._tools.get(name)
def list(self) -> List[ToolDefinition]:
"""列出所有工具"""
return list(self._tools.values())
def to_openai_format(self) -> List[Dict]:
"""转换为OpenAI工具格式"""
return [
{
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.parameters
}
}
for tool in self._tools.values()
]

475
src/ai/client.py Normal file
View File

@@ -0,0 +1,475 @@
"""
AI瀹㈡埛绔?- 鏁村悎鎵€鏈堿I鍔熻兘
"""
import inspect
import json
import re
from typing import List, Optional, Dict, Any, AsyncIterator, Tuple
from pathlib import Path
from .base import ModelConfig, ModelProvider, Message, ToolRegistry
from .models import OpenAIModel, AnthropicModel
from .personality import PersonalitySystem
from .memory import MemorySystem
from .task_manager import LongTaskManager
from src.utils.logger import setup_logger
logger = setup_logger('AIClient')
class AIClient:
"""AI瀹㈡埛绔?- 缁熶竴鎺ュ彛"""
def __init__(
self,
model_config: ModelConfig,
embed_config: Optional[ModelConfig] = None,
data_dir: Path = Path("data/ai"),
use_vector_db: bool = True
):
self.config = model_config
self.data_dir = data_dir
self.data_dir.mkdir(parents=True, exist_ok=True)
# 初始化主模型
self.model = self._create_model(model_config)
# 初始化嵌入模型(如果提供)
self.embed_model = None
if embed_config:
self.embed_model = self._create_model(embed_config)
logger.info(
f"嵌入模型初始化完成: {embed_config.provider.value}/{embed_config.model_name}"
)
# 初始化工具注册表
self.tools = ToolRegistry()
# 初始化人格系统
self.personality = PersonalitySystem(
config_path=data_dir / "personalities.json"
)
# 初始化记忆系统
self.memory = MemorySystem(
storage_path=data_dir / "long_term_memory.json",
embed_func=self._embed_wrapper,
importance_evaluator=self._evaluate_memory_importance,
use_vector_db=use_vector_db
)
# 初始化长任务管理器
self.task_manager = LongTaskManager(
storage_path=data_dir / "tasks.json"
)
logger.info(
f"AI 客户端初始化完成: {model_config.provider.value}/{model_config.model_name}"
)
def _create_model(self, config: ModelConfig):
"""创建模型实例。"""
if config.provider == ModelProvider.OPENAI:
return OpenAIModel(config)
elif config.provider == ModelProvider.ANTHROPIC:
return AnthropicModel(config)
elif config.provider in [ModelProvider.DEEPSEEK, ModelProvider.QWEN]:
# DeepSeek 和 Qwen 使用 OpenAI 兼容接口
return OpenAIModel(config)
else:
raise ValueError(f"不支持的模型提供商: {config.provider}")
async def _embed_wrapper(self, text: str) -> List[float]:
"""嵌入向量包装器。"""
try:
# 如果有独立的嵌入模型,优先使用
if self.embed_model:
return await self.embed_model.embed(text)
# 否则尝试使用主模型
return await self.model.embed(text)
except NotImplementedError:
# 如果都不支持嵌入,返回 None记忆系统会降级
logger.warning("Current model does not support embeddings; vector retrieval disabled")
return None
except Exception as e:
logger.error(f"生成嵌入向量失败: {e}")
return None
@staticmethod
def _parse_importance_score(raw: str) -> float:
text = (raw or "").strip()
if not text:
raise ValueError("empty importance response")
try:
parsed = json.loads(text)
if isinstance(parsed, (int, float)):
return float(parsed)
if isinstance(parsed, dict):
for key in ["importance", "score", "value"]:
if key in parsed:
return float(parsed[key])
except Exception:
pass
match = re.search(r"-?\d+(?:\.\d+)?", text)
if not match:
raise ValueError(f"cannot parse importance score: {text}")
return float(match.group(0))
async def _evaluate_memory_importance(
self, content: str, metadata: Optional[Dict] = None
) -> float:
"""
调用主模型评估记忆重要性,返回 [0, 1] 分值。
"""
system_prompt = (
"你是记忆重要性评估器。请根据输入内容判断该信息是否值得长期记忆。"
"输出一个 0 到 1 的数字,数字越大表示越重要。"
"只输出数字,不要输出任何解释、单位或多余文本。"
)
payload = json.dumps(
{"content": content, "metadata": metadata or {}},
ensure_ascii=False,
)
messages = [
Message(role="system", content=system_prompt),
Message(role="user", content=payload),
]
try:
response = await self.model.chat(
messages=messages,
tools=None,
temperature=0.0,
max_tokens=16,
)
score = self._parse_importance_score(response.content)
return max(0.0, min(1.0, score))
except Exception as e:
logger.warning(f"memory importance evaluation failed, fallback to neutral score: {e}")
return 0.5
async def chat(
self,
user_id: str,
user_message: str,
system_prompt: Optional[str] = None,
use_memory: bool = True,
use_tools: bool = True,
stream: bool = False,
**kwargs
) -> str:
"""对话接口。"""
try:
# 构建消息列表
messages = []
# 系统提示词
if system_prompt is None:
system_prompt = self.personality.get_system_prompt()
# 注入记忆上下文
if use_memory:
short_term, long_term = await self.memory.get_context(
user_id=user_id,
query=user_message
)
if short_term or long_term:
memory_context = self.memory.format_context(short_term, long_term)
system_prompt += f"\n\n{memory_context}"
messages.append(Message(role="system", content=system_prompt))
# 添加用户消息
messages.append(Message(role="user", content=user_message))
# 准备工具
tools = None
if use_tools and self.tools.list():
tools = self.tools.to_openai_format()
# 调用模型
if stream:
return self._chat_stream(messages, tools, **kwargs)
else:
response = await self.model.chat(messages, tools, **kwargs)
# 处理工具调用
if response.tool_calls:
response = await self._handle_tool_calls(
messages, response, tools, **kwargs
)
# 写入记忆
if use_memory:
stored_memory = await self.memory.add_qa_pair(
user_id=user_id,
question=user_message,
answer=response.content,
metadata={"source": "chat"},
)
if stored_memory:
logger.info(
"已写入长期记忆问答对:\n"
f"{stored_memory.content}\n"
f"memory_id={stored_memory.id}, "
f"importance={stored_memory.importance:.2f}"
)
return response.content
except Exception as e:
logger.error(f"对话失败: {e}")
raise
async def _chat_stream(
self,
messages: List[Message],
tools: Optional[List[Dict]],
**kwargs
) -> AsyncIterator[str]:
"""流式对话。"""
async for chunk in self.model.chat_stream(messages, tools, **kwargs):
yield chunk
async def _handle_tool_calls(
self,
messages: List[Message],
response: Message,
tools: Optional[List[Dict]],
**kwargs
) -> Message:
"""处理工具调用。"""
messages.append(response)
# 鎵ц宸ュ叿璋冪敤
for tool_call in response.tool_calls or []:
try:
tool_name, tool_args, tool_call_id = self._parse_tool_call(tool_call)
except Exception as e:
logger.warning(f"解析工具调用失败: {e}")
fallback_id = tool_call.get('id') if isinstance(tool_call, dict) else getattr(tool_call, 'id', None)
if fallback_id:
messages.append(Message(
role="tool",
content=f"工具参数解析失败: {str(e)}",
tool_call_id=fallback_id
))
continue
if not tool_name:
logger.warning(f"跳过无效工具调用: {tool_call}")
continue
tool_def = self.tools.get(tool_name)
if not tool_def:
error_msg = f"未找到工具: {tool_name}"
logger.warning(error_msg)
if tool_call_id:
messages.append(Message(
role="tool",
content=error_msg,
tool_call_id=tool_call_id
))
continue
try:
result = tool_def.function(**tool_args)
if inspect.isawaitable(result):
result = await result
if tool_call_id:
messages.append(Message(
role="tool",
content=str(result),
tool_call_id=tool_call_id
))
except Exception as e:
if tool_call_id:
messages.append(Message(
role="tool",
content=f"工具执行失败: {str(e)}",
tool_call_id=tool_call_id
))
# 再次调用模型获取最终响应
return await self.model.chat(messages, tools, **kwargs)
def _parse_tool_call(self, tool_call: Any) -> Tuple[Optional[str], Dict[str, Any], Optional[str]]:
"""兼容不同 SDK 返回的工具调用结构。"""
if isinstance(tool_call, dict):
tool_call_id = tool_call.get('id')
function = tool_call.get('function') or {}
tool_name = function.get('name')
raw_args = function.get('arguments')
else:
tool_call_id = getattr(tool_call, 'id', None)
function = getattr(tool_call, 'function', None)
tool_name = getattr(function, 'name', None) if function else None
raw_args = getattr(function, 'arguments', None) if function else None
tool_args = self._normalize_tool_args(raw_args)
return tool_name, tool_args, tool_call_id
def _normalize_tool_args(self, raw_args: Any) -> Dict[str, Any]:
"""将工具参数统一转换为字典。"""
if raw_args is None:
return {}
if isinstance(raw_args, dict):
return raw_args
if isinstance(raw_args, str):
raw_args = raw_args.strip()
if not raw_args:
return {}
parsed = json.loads(raw_args)
if not isinstance(parsed, dict):
raise ValueError(f"工具参数必须是 JSON 对象,实际类型: {type(parsed)}")
return parsed
if hasattr(raw_args, 'model_dump'):
parsed = raw_args.model_dump()
if isinstance(parsed, dict):
return parsed
raise ValueError(f"不支持的工具参数类型: {type(raw_args)}")
def set_personality(self, personality_name: str) -> bool:
"""设置人格。"""
return self.personality.set_personality(personality_name)
def list_personalities(self) -> List[str]:
"""列出所有人格。"""
return self.personality.list_personalities()
def switch_model(self, model_config: ModelConfig) -> bool:
"""Runtime switch for primary chat model."""
new_model = self._create_model(model_config)
self.model = new_model
self.config = model_config
logger.info(
f"已切换主模型: {model_config.provider.value}/{model_config.model_name}"
)
return True
async def create_long_task(
self,
user_id: str,
title: str,
description: str,
steps: List[Dict],
metadata: Optional[Dict] = None
) -> str:
"""创建长任务。"""
return self.task_manager.create_task(
user_id=user_id,
title=title,
description=description,
steps=steps,
metadata=metadata
)
async def start_task(
self,
task_id: str,
progress_callback: Optional[callable] = None
):
"""启动任务。"""
await self.task_manager.start_task(task_id, progress_callback)
def get_task_status(self, task_id: str) -> Optional[Dict]:
"""获取任务状态。"""
return self.task_manager.get_task_status(task_id)
def register_tool(self, name: str, description: str, parameters: Dict, function: callable):
"""注册工具。"""
from .base import ToolDefinition
tool = ToolDefinition(
name=name,
description=description,
parameters=parameters,
function=function
)
self.tools.register(tool)
logger.info(f"已注册工具: {name}")
def unregister_tool(self, name: str) -> bool:
"""卸载工具。"""
removed = self.tools.unregister(name)
if removed:
logger.info(f"已卸载工具: {name}")
return removed
def unregister_tools_by_prefix(self, prefix: str) -> int:
"""按前缀批量卸载工具。"""
removed_count = self.tools.unregister_by_prefix(prefix)
if removed_count:
logger.info(f"Unregistered tools by prefix {prefix}: {removed_count}")
return removed_count
def clear_memory(self, user_id: str):
"""清除用户短期记忆。"""
self.memory.clear_short_term(user_id)
logger.info(f"Cleared short-term memory for user {user_id}")
async def clear_long_term_memory(self, user_id: str) -> bool:
try:
await self.memory.clear_long_term(user_id)
logger.info(f"Cleared long-term memory for user {user_id}")
return True
except Exception as e:
logger.warning(f"Failed to clear long-term memory for user {user_id}: {e}")
return False
async def list_long_term_memories(self, user_id: str, limit: int = 20):
return await self.memory.list_long_term(user_id, limit=limit)
async def get_long_term_memory(self, user_id: str, memory_id: str):
return await self.memory.get_long_term(user_id, memory_id)
async def add_long_term_memory(
self,
user_id: str,
content: str,
importance: float = 0.8,
metadata: Optional[Dict] = None,
):
return await self.memory.add_long_term(
user_id=user_id,
content=content,
importance=importance,
metadata=metadata,
)
async def search_long_term_memories(
self, user_id: str, query: str, limit: int = 10
):
return await self.memory.search_long_term(user_id, query=query, limit=limit)
async def update_long_term_memory(
self,
user_id: str,
memory_id: str,
content: Optional[str] = None,
importance: Optional[float] = None,
metadata: Optional[Dict] = None,
):
return await self.memory.update_long_term(
user_id=user_id,
memory_id=memory_id,
content=content,
importance=importance,
metadata=metadata,
)
async def delete_long_term_memory(self, user_id: str, memory_id: str) -> bool:
return await self.memory.delete_long_term(user_id, memory_id)
async def clear_all_memory(self, user_id: str) -> bool:
"""清除用户全部记忆(短期 + 长期)。"""
self.clear_memory(user_id)
try:
return await self.clear_long_term_memory(user_id)
except Exception:
return False

545
src/ai/docs/README.md Normal file
View File

@@ -0,0 +1,545 @@
# AI模型接入完整指南
## 概述
本系统提供了完整的AI模型接入能力包括
-**多模型支持**OpenAI、Anthropic Claude、DeepSeek、Qwen等
-**人格系统**可自定义AI性格特征和说话风格
-**记忆系统**:短期记忆(滑动窗口)+ 长期记忆RAG检索
-**长任务处理**:支持多步骤复杂任务的执行和管理
-**Skills插件**:可扩展的技能插件系统
-**MCP支持**Model Context Protocol标准支持
## 快速开始
### 1. 安装依赖
```bash
pip install openai anthropic numpy
```
### 2. 配置环境变量
`.env` 文件中添加:
```env
# AI模型配置
AI_PROVIDER=openai # openai, anthropic, deepseek, qwen
AI_MODEL=gpt-4
AI_API_KEY=your_api_key_here
AI_API_BASE=https://api.openai.com/v1 # 可选自定义API地址
```
### 3. 基础使用
```python
from pathlib import Path
from src.ai import AIClient
from src.ai.base import ModelConfig, ModelProvider
# 创建模型配置
config = ModelConfig(
provider=ModelProvider.OPENAI,
model_name="gpt-4",
api_key="your_api_key",
temperature=0.7
)
# 初始化AI客户端
client = AIClient(config, data_dir=Path("data/ai"))
# 对话
response = await client.chat(
user_id="user123",
user_message="你好,介绍一下你自己",
use_memory=True,
use_tools=True
)
print(response)
```
## 核心功能详解
### 1. 人格系统
人格系统允许你为AI定义独特的性格特征和说话风格。
#### 使用预设人格
```python
# 列出所有人格
personalities = client.list_personalities()
print(personalities) # ['default', 'tech_expert', 'creative']
# 切换人格
client.set_personality('tech_expert')
```
#### 自定义人格
```python
from src.ai.personality import PersonalityProfile, PersonalityTrait
# 创建自定义人格
custom_personality = PersonalityProfile(
name="游戏助手",
description="专业的游戏顾问和陪玩伙伴",
traits=[
PersonalityTrait.FRIENDLY,
PersonalityTrait.HUMOROUS,
PersonalityTrait.CREATIVE
],
speaking_style="活泼、热情、善用游戏术语和梗",
example_responses=[
"哇!这个操作太秀了!",
"让我们一起冲冲冲!",
"别慌,稳住我们能赢!"
],
custom_instructions="始终保持积极乐观的态度,鼓励玩家"
)
# 添加到系统
client.personality.add_personality('gamer', custom_personality)
client.set_personality('gamer')
```
### 2. 记忆系统
记忆系统分为短期记忆和长期记忆两部分。
#### 短期记忆
- 自动保存最近的对话默认20条
- 使用滑动窗口自动清理过期记忆默认30分钟
- 无需手动管理
#### 长期记忆
- 自动评估重要性,重要对话会被保存
- 支持向量检索RAG
- 可手动查询历史记忆
```python
# 获取用户的记忆上下文
short_term, long_term = await client.memory.get_context(
user_id="user123",
query="我们之前讨论过什么?",
max_short_term=10,
max_long_term=5
)
# 格式化为文本
context = client.memory.format_context(short_term, long_term)
print(context)
# 清除短期记忆
client.clear_memory("user123")
```
#### 记忆重要性评估
系统会自动评估对话的重要性:
- 内容长度(>100字符 +0.1分)
- 关键词("重要"、"记住"、"别忘了" +0.2分)
- 元数据(问题 +0.1分,代码 +0.15分)
- 阈值≥0.6分会被保存到长期记忆
### 3. 长任务处理
支持将复杂任务分解为多个步骤执行。
```python
# 创建长任务
task_id = await client.create_long_task(
user_id="user123",
title="数据分析任务",
description="分析用户行为数据并生成报告",
steps=[
{
"description": "加载数据",
"action": "load_data",
"params": {"file_path": "data.csv"}
},
{
"description": "数据清洗",
"action": "clean_data",
"params": {}
},
{
"description": "统计分析",
"action": "analyze",
"params": {"metrics": ["avg", "max", "min"]}
},
{
"description": "生成报告",
"action": "generate_report",
"params": {"format": "pdf"}
}
]
)
# 定义进度回调
async def on_progress(task_id, progress, message):
print(f"任务 {task_id}: {progress*100:.1f}% - {message}")
# 启动任务
await client.start_task(task_id, progress_callback=on_progress)
# 查询任务状态
status = client.get_task_status(task_id)
print(status)
```
#### 注册任务动作处理器
```python
# 注册动作处理器
async def load_data_handler(file_path: str):
# 实现数据加载逻辑
return {"rows": 1000, "columns": 10}
client.task_manager.register_action("load_data", load_data_handler)
```
### 4. Skills插件系统
Skills系统允许你扩展AI的能力。
#### 加载技能
```python
from pathlib import Path
from src.ai.skills import SkillsManager
# 创建技能管理器
skills_manager = SkillsManager(skills_dir=Path("skills"))
# 加载单个技能
await skills_manager.load_skill("weather")
# 加载所有技能
await skills_manager.load_all_skills()
# 获取所有工具
tools = skills_manager.get_all_tools()
```
#### 创建自定义技能
```python
from src.ai.skills import create_skill_template
# 创建技能模板
create_skill_template("my_skill", Path("skills"))
```
技能目录结构:
```
skills/
└── my_skill/
├── skill.json # 元数据
├── main.py # 主要实现
├── __init__.py
└── README.md
```
`main.py` 示例:
```python
from src.ai.skills.base import Skill
class MySkill(Skill):
async def initialize(self):
# 注册工具
self.register_tool("my_tool", self.my_tool)
async def my_tool(self, param: str) -> str:
return f"处理: {param}"
async def cleanup(self):
pass
```
### 5. MCP支持
MCP (Model Context Protocol) 提供标准化的上下文访问接口。
#### 使用内置MCP服务器
```python
from pathlib import Path
from src.ai.mcp import MCPManager
from src.ai.mcp.servers import FileSystemMCPServer
# 创建MCP管理器
mcp_manager = MCPManager(config_path=Path("config/mcp.json"))
# 注册文件系统服务器
fs_server = FileSystemMCPServer(root_path=Path("data"))
await mcp_manager.register_server(fs_server)
# 获取所有工具
tools = await mcp_manager.get_all_tools_for_ai()
# 执行工具
result = await mcp_manager.execute_tool(
"filesystem.read_file",
{"path": "test.txt"}
)
```
#### 创建自定义MCP服务器
```python
from src.ai.mcp.base import MCPServer
class MyMCPServer(MCPServer):
def __init__(self):
super().__init__(name="my_server", version="1.0.0")
async def initialize(self):
self.register_tool(
name="my_tool",
description="我的工具",
input_schema={
"type": "object",
"properties": {
"param": {"type": "string"}
},
"required": ["param"]
},
handler=self.my_tool_handler
)
async def my_tool_handler(self, param: str):
return f"处理: {param}"
```
## 集成到QQ机器人
### 修改消息处理器
```python
# src/handlers/message_handler.py
from src.ai import AIClient
from src.ai.base import ModelConfig, ModelProvider
from pathlib import Path
class MessageHandler:
def __init__(self, bot):
self.bot = bot
# 初始化AI客户端
config = ModelConfig(
provider=ModelProvider.OPENAI,
model_name="gpt-4",
api_key=Config.AI_API_KEY,
api_base=Config.AI_API_BASE
)
self.ai_client = AIClient(config, data_dir=Path("data/ai"))
# 加载技能
from src.ai.skills import SkillsManager
self.skills = SkillsManager(Path("skills"))
asyncio.create_task(self.skills.load_all_skills())
async def handle_at_message(self, message: Message):
try:
# 提取用户消息
user_message = message.content.strip()
user_id = message.author.id
# 调用AI
response = await self.ai_client.chat(
user_id=user_id,
user_message=user_message,
use_memory=True,
use_tools=True
)
# 发送回复
await message.reply(content=response)
except Exception as e:
logger.error(f"处理消息失败: {e}")
await message.reply(content="抱歉,处理消息时出错了")
```
## 配置文件
### config/personalities.json
```json
{
"default": {
"name": "小助手",
"description": "友好的AI助手",
"traits": ["FRIENDLY", "PROFESSIONAL", "EMPATHETIC"],
"speaking_style": "温和、清晰、有条理",
"example_responses": [
"我明白你的意思了,让我来帮你解决这个问题。",
"这是个很好的问题!我来详细解释一下。"
],
"custom_instructions": ""
}
}
```
### config/mcp.json
```json
{
"filesystem": {
"enabled": true,
"root_path": "data"
}
}
```
## 高级用法
### 1. 流式响应
```python
async for chunk in client.chat(
user_id="user123",
user_message="讲个故事",
stream=True
):
print(chunk, end='', flush=True)
```
### 2. 自定义工具
```python
# 定义工具函数
async def search_web(query: str) -> str:
# 实现搜索逻辑
return f"搜索结果: {query}"
# 注册工具
client.register_tool(
name="search_web",
description="搜索网络信息",
parameters={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "搜索关键词"
}
},
"required": ["query"]
},
function=search_web
)
```
### 3. 多模型切换
```python
# 切换到Claude
claude_config = ModelConfig(
provider=ModelProvider.ANTHROPIC,
model_name="claude-3-5-sonnet-20241022",
api_key="your_anthropic_key"
)
client.model = client._create_model(claude_config)
# 切换到DeepSeek
deepseek_config = ModelConfig(
provider=ModelProvider.DEEPSEEK,
model_name="deepseek-chat",
api_key="your_deepseek_key",
api_base="https://api.deepseek.com/v1"
)
client.model = client._create_model(deepseek_config)
```
## 性能优化
### 1. 记忆系统优化
```python
# 调整短期记忆大小
client.memory.short_term.max_size = 30 # 默认20
# 调整记忆过期时间
from datetime import timedelta
client.memory.short_term.max_age = timedelta(hours=1) # 默认30分钟
# 调整重要性阈值
client.memory.importance_threshold = 0.7 # 默认0.6
```
### 2. 并发任务处理
```python
# 同时启动多个任务
tasks = []
for i in range(5):
task_id = await client.create_long_task(...)
tasks.append(client.start_task(task_id))
# 等待所有任务完成
await asyncio.gather(*tasks)
```
## 故障排查
### 常见问题
1. **API调用失败**
- 检查API密钥是否正确
- 检查网络连接
- 检查API地址是否正确
2. **记忆系统不工作**
- 确保 `use_memory=True`
- 检查数据目录权限
- 查看日志文件
3. **技能加载失败**
- 检查 `skill.json` 格式
- 确保 `main.py` 中有Skill子类
- 查看错误日志
## 最佳实践
1. **合理使用记忆系统**
- 不要在每次对话都查询长期记忆
- 定期清理不重要的记忆
- 为重要对话添加元数据标记
2. **技能开发**
- 保持技能功能单一
- 提供清晰的工具描述
- 处理异常情况
3. **长任务设计**
- 将任务分解为合理的步骤
- 提供进度反馈
- 支持任务暂停和恢复
## 更新日志
### v1.0.0 (2024-03-02)
- ✅ 初始版本发布
- ✅ 支持OpenAI、Anthropic、DeepSeek、Qwen
- ✅ 人格系统
- ✅ 记忆系统(短期+长期+RAG
- ✅ 长任务处理
- ✅ Skills插件系统
- ✅ MCP支持
## 贡献
欢迎提交Issue和Pull Request
## 许可证
MIT License

13
src/ai/mcp/__init__.py Normal file
View File

@@ -0,0 +1,13 @@
"""
MCP模块
"""
from .base import MCPServer, MCPClient, MCPManager, MCPResource, MCPTool, MCPPrompt
__all__ = [
'MCPServer',
'MCPClient',
'MCPManager',
'MCPResource',
'MCPTool',
'MCPPrompt'
]

219
src/ai/mcp/base.py Normal file
View File

@@ -0,0 +1,219 @@
"""
MCP (Model Context Protocol) 支持
"""
import asyncio
import json
from typing import Dict, List, Optional, Any, Callable
from dataclasses import dataclass, asdict
from pathlib import Path
from src.utils.logger import setup_logger
logger = setup_logger('MCPSystem')
@dataclass
class MCPResource:
"""MCP资源"""
uri: str
name: str
description: str
mime_type: str
@dataclass
class MCPTool:
"""MCP工具"""
name: str
description: str
input_schema: Dict[str, Any]
@dataclass
class MCPPrompt:
"""MCP提示词"""
name: str
description: str
arguments: List[Dict[str, Any]]
class MCPServer:
"""MCP服务器基类"""
def __init__(self, name: str, version: str):
self.name = name
self.version = version
self.resources: Dict[str, MCPResource] = {}
self.tools: Dict[str, Callable] = {}
self.prompts: Dict[str, MCPPrompt] = {}
async def initialize(self):
"""初始化服务器"""
pass
async def shutdown(self):
"""关闭服务器"""
pass
def register_resource(self, resource: MCPResource):
"""注册资源"""
self.resources[resource.uri] = resource
def register_tool(self, name: str, description: str, input_schema: Dict, handler: Callable):
"""注册工具"""
tool = MCPTool(name=name, description=description, input_schema=input_schema)
self.tools[name] = handler
logger.info(f"✅ MCP工具注册: {self.name}.{name}")
def register_prompt(self, prompt: MCPPrompt):
"""注册提示词"""
self.prompts[prompt.name] = prompt
async def list_resources(self) -> List[MCPResource]:
"""列出资源"""
return list(self.resources.values())
async def read_resource(self, uri: str) -> Optional[str]:
"""读取资源"""
raise NotImplementedError
async def list_tools(self) -> List[MCPTool]:
"""列出工具"""
return [
MCPTool(name=name, description="", input_schema={})
for name in self.tools.keys()
]
async def call_tool(self, name: str, arguments: Dict[str, Any]) -> Any:
"""调用工具"""
if name not in self.tools:
raise ValueError(f"工具不存在: {name}")
handler = self.tools[name]
return await handler(**arguments)
async def list_prompts(self) -> List[MCPPrompt]:
"""列出提示词"""
return list(self.prompts.values())
async def get_prompt(self, name: str, arguments: Dict[str, Any]) -> Optional[str]:
"""获取提示词"""
raise NotImplementedError
class MCPClient:
"""MCP客户端"""
def __init__(self):
self.servers: Dict[str, MCPServer] = {}
async def connect_server(self, server: MCPServer):
"""连接服务器"""
await server.initialize()
self.servers[server.name] = server
logger.info(f"✅ 连接MCP服务器: {server.name} v{server.version}")
async def disconnect_server(self, server_name: str):
"""断开服务器"""
if server_name in self.servers:
await self.servers[server_name].shutdown()
del self.servers[server_name]
logger.info(f"✅ 断开MCP服务器: {server_name}")
def get_server(self, name: str) -> Optional[MCPServer]:
"""获取服务器"""
return self.servers.get(name)
def list_servers(self) -> List[str]:
"""列出所有服务器"""
return list(self.servers.keys())
async def list_all_resources(self) -> Dict[str, List[MCPResource]]:
"""列出所有资源"""
result = {}
for name, server in self.servers.items():
result[name] = await server.list_resources()
return result
async def list_all_tools(self) -> Dict[str, List[MCPTool]]:
"""列出所有工具"""
result = {}
for name, server in self.servers.items():
result[name] = await server.list_tools()
return result
async def call_tool(self, server_name: str, tool_name: str, arguments: Dict[str, Any]) -> Any:
"""调用工具"""
server = self.get_server(server_name)
if not server:
raise ValueError(f"服务器不存在: {server_name}")
return await server.call_tool(tool_name, arguments)
class MCPManager:
"""MCP管理器"""
def __init__(self, config_path: Path):
self.config_path = config_path
self.client = MCPClient()
self.server_configs: Dict[str, Dict] = {}
self._load_config()
def _load_config(self):
"""加载配置"""
if self.config_path.exists():
with open(self.config_path, 'r', encoding='utf-8') as f:
self.server_configs = json.load(f)
def _save_config(self):
"""保存配置"""
self.config_path.parent.mkdir(parents=True, exist_ok=True)
with open(self.config_path, 'w', encoding='utf-8') as f:
json.dump(self.server_configs, f, ensure_ascii=False, indent=2)
async def register_server(self, server: MCPServer, config: Optional[Dict] = None):
"""注册服务器"""
await self.client.connect_server(server)
if config:
self.server_configs[server.name] = config
self._save_config()
async def unregister_server(self, server_name: str):
"""注销服务器"""
await self.client.disconnect_server(server_name)
if server_name in self.server_configs:
del self.server_configs[server_name]
self._save_config()
def get_client(self) -> MCPClient:
"""获取客户端"""
return self.client
async def get_all_tools_for_ai(self) -> List[Dict]:
"""获取所有工具AI格式"""
all_tools = []
tools_by_server = await self.client.list_all_tools()
for server_name, tools in tools_by_server.items():
for tool in tools:
all_tools.append({
"type": "function",
"function": {
"name": f"{server_name}.{tool.name}",
"description": tool.description,
"parameters": tool.input_schema
}
})
return all_tools
async def execute_tool(self, full_tool_name: str, arguments: Dict) -> Any:
"""执行工具"""
parts = full_tool_name.split('.', 1)
if len(parts) != 2:
raise ValueError(f"工具名格式错误: {full_tool_name}")
server_name, tool_name = parts
return await self.client.call_tool(server_name, tool_name, arguments)

View File

@@ -0,0 +1,6 @@
"""
MCP服务器实现
"""
from .filesystem import FileSystemMCPServer
__all__ = ['FileSystemMCPServer']

View File

@@ -0,0 +1,123 @@
"""
MCP示例服务器 - 文件系统访问
"""
from pathlib import Path
from typing import Optional
from ..base import MCPServer, MCPResource
class FileSystemMCPServer(MCPServer):
"""文件系统MCP服务器"""
def __init__(self, root_path: Path):
super().__init__(name="filesystem", version="1.0.0")
self.root_path = root_path
async def initialize(self):
"""初始化"""
# 注册工具
self.register_tool(
name="read_file",
description="读取文件内容",
input_schema={
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "文件路径"
}
},
"required": ["path"]
},
handler=self.read_file
)
self.register_tool(
name="write_file",
description="写入文件内容",
input_schema={
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "文件路径"
},
"content": {
"type": "string",
"description": "文件内容"
}
},
"required": ["path", "content"]
},
handler=self.write_file
)
self.register_tool(
name="list_directory",
description="列出目录内容",
input_schema={
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "目录路径"
}
},
"required": ["path"]
},
handler=self.list_directory
)
def _resolve_path(self, path: str) -> Path:
"""解析路径"""
full_path = (self.root_path / path).resolve()
# 安全检查确保路径在root_path内
if not str(full_path).startswith(str(self.root_path)):
raise ValueError("路径超出允许范围")
return full_path
async def read_file(self, path: str) -> str:
"""读取文件"""
file_path = self._resolve_path(path)
if not file_path.exists():
raise FileNotFoundError(f"文件不存在: {path}")
if not file_path.is_file():
raise ValueError(f"不是文件: {path}")
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
async def write_file(self, path: str, content: str) -> str:
"""写入文件"""
file_path = self._resolve_path(path)
file_path.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
return f"文件已写入: {path}"
async def list_directory(self, path: str) -> list:
"""列出目录"""
dir_path = self._resolve_path(path)
if not dir_path.exists():
raise FileNotFoundError(f"目录不存在: {path}")
if not dir_path.is_dir():
raise ValueError(f"不是目录: {path}")
items = []
for item in dir_path.iterdir():
items.append({
"name": item.name,
"type": "directory" if item.is_dir() else "file",
"size": item.stat().st_size if item.is_file() else None
})
return items

466
src/ai/memory.py Normal file
View File

@@ -0,0 +1,466 @@
"""
记忆系统:短期记忆、长期记忆与 RAG 检索(向量数据库)。
"""
import asyncio
import hashlib
import uuid
from typing import List, Dict, Optional, Tuple, Callable, Awaitable
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from pathlib import Path
from collections import deque
from .vector_store import VectorStore, VectorMemory, ChromaVectorStore, JSONVectorStore
from src.utils.logger import setup_logger
logger = setup_logger('MemorySystem')
@dataclass
class MemoryItem:
"""记忆项(用于短期记忆)。"""
content: str
timestamp: datetime
user_id: str
importance: float = 0.5
metadata: Dict = field(default_factory=dict)
def to_dict(self) -> Dict:
"""转换为字典。"""
return {
'content': self.content,
'timestamp': self.timestamp.isoformat(),
'user_id': self.user_id,
'importance': self.importance,
'metadata': self.metadata
}
class ShortTermMemory:
"""短期记忆(滑动窗口)。"""
def __init__(self, max_size: int = 20, max_age_minutes: int = 30):
self.max_size = max_size
self.max_age = timedelta(minutes=max_age_minutes)
self.memories: Dict[str, deque] = {} # user_id -> deque of MemoryItem
def add(self, user_id: str, content: str, metadata: Optional[Dict] = None):
"""添加短期记忆。"""
if user_id not in self.memories:
self.memories[user_id] = deque(maxlen=self.max_size)
memory = MemoryItem(
content=content,
timestamp=datetime.now(),
user_id=user_id,
metadata=metadata or {}
)
self.memories[user_id].append(memory)
def get(self, user_id: str, limit: Optional[int] = None) -> List[MemoryItem]:
"""获取短期记忆。"""
if user_id not in self.memories:
return []
# 过滤过期记忆
now = datetime.now()
valid_memories = [
m for m in self.memories[user_id]
if now - m.timestamp <= self.max_age
]
if limit:
valid_memories = valid_memories[-limit:]
return valid_memories
def clear(self, user_id: str):
"""清除用户短期记忆。"""
if user_id in self.memories:
self.memories.pop(user_id, None)
class MemorySystem:
"""记忆系统:整合短期记忆与长期记忆。"""
def __init__(
self,
storage_path: Path,
embed_func: Optional[callable] = None,
importance_evaluator: Optional[Callable[[str, Optional[Dict]], Awaitable[float]]] = None,
importance_threshold: float = 0.6,
use_vector_db: bool = True
):
self.short_term = ShortTermMemory()
self.embed_func = embed_func
self.importance_evaluator = importance_evaluator
self.importance_threshold = importance_threshold
# 初始化向量存储
if use_vector_db:
try:
# 使用 Chroma 向量数据库
chroma_path = storage_path.parent / "chroma_db"
self.vector_store: VectorStore = ChromaVectorStore(chroma_path)
logger.info("Using Chroma vector store")
except Exception as e:
logger.warning(f"Chroma 初始化失败,降级为 JSON 存储: {e}")
self.vector_store = JSONVectorStore(storage_path)
else:
# 使用 JSON 存储(向后兼容)
self.vector_store = JSONVectorStore(storage_path)
logger.info("使用 JSON 存储")
@staticmethod
def _normalize_embedding(values: List[float], dim: int = 1024) -> List[float]:
if not values:
return [0.0] * dim
normalized = [float(v) for v in values[:dim]]
if len(normalized) < dim:
normalized.extend([0.0] * (dim - len(normalized)))
return normalized
@staticmethod
def _local_embedding(text: str, dim: int = 1024) -> List[float]:
if not text:
return [0.0] * dim
vec = [0.0] * dim
encoded = text.encode("utf-8", errors="ignore")
if not encoded:
return vec
for idx, byte in enumerate(encoded):
bucket = idx % dim
vec[bucket] += (byte / 255.0)
digest = hashlib.sha256(encoded).digest()
for idx, byte in enumerate(digest):
bucket = idx % dim
vec[bucket] += ((byte / 255.0) - 0.5) * 0.1
return vec
async def _build_embedding(self, text: str) -> List[float]:
if self.embed_func:
try:
embedding = await self.embed_func(text)
if embedding:
return [float(v) for v in list(embedding)]
except Exception as e:
logger.warning(f"embedding generation failed: {e}")
return self._local_embedding(text)
async def _add_vector_memory(
self,
memory_id: str,
user_id: str,
content: str,
embedding: List[float],
importance: float,
metadata: Optional[Dict] = None,
) -> bool:
if await self.vector_store.add(
id=memory_id,
user_id=user_id,
content=content,
embedding=embedding,
importance=importance,
metadata=metadata,
):
return True
# Chroma collection may have a fixed historical embedding dimension.
candidate_dims = []
base_len = len(embedding or [])
for dim in [base_len, 1024, 1536, 768, 384, 3072]:
if dim and dim > 0 and dim not in candidate_dims:
candidate_dims.append(dim)
for dim in candidate_dims:
retry_embedding = self._normalize_embedding(list(embedding or []), dim=dim)
ok = await self.vector_store.add(
id=memory_id,
user_id=user_id,
content=content,
embedding=retry_embedding,
importance=importance,
metadata=metadata,
)
if ok:
return True
return False
@staticmethod
def _normalize_importance(importance: float) -> float:
try:
value = float(importance)
except (TypeError, ValueError):
value = 0.5
return max(0.0, min(1.0, value))
async def add_message(
self,
user_id: str,
role: str,
content: str,
metadata: Optional[Dict] = None
):
"""向短期记忆添加单条消息(不做长期记忆评分)。"""
self.short_term.add(user_id, content, metadata)
async def add_qa_pair(
self,
user_id: str,
question: str,
answer: str,
metadata: Optional[Dict] = None,
) -> Optional[VectorMemory]:
"""
添加最新问答对,并仅对该问答对做模型重要性评估。
"""
user_meta = {"role": "user"}
assistant_meta = {"role": "assistant"}
if isinstance(metadata, dict):
user_meta.update(metadata)
assistant_meta.update(metadata)
self.short_term.add(user_id, question, user_meta)
self.short_term.add(user_id, answer, assistant_meta)
qa_content = f"用户: {question}\n助手: {answer}"
qa_metadata = dict(metadata or {})
qa_metadata.update({"type": "qa_pair", "question": question, "answer": answer})
importance = await self._evaluate_importance(qa_content, qa_metadata)
if importance < self.importance_threshold:
return None
embedding = await self._build_embedding(qa_content)
memory_id = str(uuid.uuid4())
ok = await self._add_vector_memory(
memory_id=memory_id,
user_id=user_id,
content=qa_content,
embedding=embedding,
importance=importance,
metadata=qa_metadata,
)
if not ok:
return None
return await self.get_long_term(user_id, memory_id)
async def _evaluate_importance(self, content: str, metadata: Optional[Dict]) -> float:
"""评估记忆重要性。"""
if not content or not content.strip():
return 0.0
if self.importance_evaluator:
try:
score = await self.importance_evaluator(content, metadata)
return self._normalize_importance(score)
except Exception as e:
logger.warning(f"importance evaluation failed, fallback to neutral score: {e}")
# 当模型评估不可用时,使用中性分数作为兜底。
return 0.5
async def get_context(
self,
user_id: str,
query: Optional[str] = None,
max_short_term: int = 10,
max_long_term: int = 5
) -> Tuple[List[MemoryItem], List[VectorMemory]]:
"""获取上下文(短期 + 长期记忆)。"""
# 获取短期记忆
short_term_memories = self.short_term.get(user_id, limit=max_short_term)
# 获取相关长期记忆
long_term_memories = []
if query:
try:
# 使用向量检索
query_embedding = await self._build_embedding(query)
if query_embedding:
long_term_memories = await self.vector_store.search(
user_id=user_id,
query_embedding=query_embedding,
limit=max_long_term
)
except Exception as e:
logger.warning(f"向量检索失败,改用重要性检索: {e}")
if query and not long_term_memories:
query_lower = query.lower()
try:
candidates = await self.vector_store.get_all(user_id)
matches = [m for m in candidates if query_lower in m.content.lower()]
matches.sort(key=lambda m: (m.importance, m.timestamp), reverse=True)
long_term_memories = matches[:max_long_term]
except Exception:
pass
# 濡傛灉鍚戦噺妫€绱㈠け璐ユ垨娌℃湁缁撴灉锛屼娇鐢ㄩ噸瑕佹€ф绱?
if not long_term_memories:
long_term_memories = await self.vector_store.get_by_importance(
user_id=user_id,
limit=max_long_term
)
# 更新长期记忆访问记录
for memory in long_term_memories:
await self.vector_store.update_access(memory.id)
return short_term_memories, long_term_memories
def format_context(
self,
short_term: List[MemoryItem],
long_term: List[VectorMemory]
) -> str:
"""格式化上下文为文本。"""
context = ""
if long_term:
context += "## 相关历史记忆\n"
for i, memory in enumerate(long_term, 1):
context += f"{i}. {memory.content}\n"
context += "\n"
if short_term:
context += "## 最近对话\n"
for memory in short_term:
context += f"- {memory.content}\n"
return context
async def list_long_term(
self, user_id: str, limit: int = 20
) -> List[VectorMemory]:
memories = await self.vector_store.get_all(user_id)
memories.sort(key=lambda m: m.timestamp, reverse=True)
if limit > 0:
return memories[:limit]
return memories
async def get_long_term(self, user_id: str, memory_id: str) -> Optional[VectorMemory]:
memories = await self.vector_store.get_all(user_id)
for memory in memories:
if memory.id == memory_id:
return memory
return None
async def add_long_term(
self,
user_id: str,
content: str,
importance: float = 0.8,
metadata: Optional[Dict] = None,
) -> Optional[VectorMemory]:
memory_id = str(uuid.uuid4())
importance = self._normalize_importance(importance)
embedding = await self._build_embedding(content)
ok = await self._add_vector_memory(
memory_id=memory_id,
user_id=user_id,
content=content,
embedding=embedding,
importance=importance,
metadata=metadata or {},
)
if not ok:
return None
return await self.get_long_term(user_id, memory_id)
async def search_long_term(
self, user_id: str, query: str, limit: int = 10
) -> List[VectorMemory]:
if not query:
return []
query_embedding = await self._build_embedding(query)
results = await self.vector_store.search(
user_id=user_id,
query_embedding=query_embedding,
limit=limit,
min_importance=0.0,
)
if results:
return results
all_memories = await self.vector_store.get_all(user_id)
query_lower = query.lower()
matched = [m for m in all_memories if query_lower in m.content.lower()]
matched.sort(key=lambda m: (m.importance, m.timestamp), reverse=True)
return matched[:limit]
async def update_long_term(
self,
user_id: str,
memory_id: str,
content: Optional[str] = None,
importance: Optional[float] = None,
metadata: Optional[Dict] = None,
) -> Optional[VectorMemory]:
original = await self.get_long_term(user_id, memory_id)
if not original:
return None
new_content = content if content is not None else original.content
new_importance = (
self._normalize_importance(importance)
if importance is not None
else original.importance
)
new_metadata = dict(original.metadata or {})
if metadata is not None:
new_metadata = metadata
if content is not None:
new_embedding = await self._build_embedding(new_content)
else:
new_embedding = self._normalize_embedding(list(original.embedding or []))
deleted = await self.vector_store.delete(memory_id)
if not deleted:
return None
added = await self._add_vector_memory(
memory_id=memory_id,
user_id=user_id,
content=new_content,
embedding=new_embedding,
importance=new_importance,
metadata=new_metadata,
)
if not added:
return None
return await self.get_long_term(user_id, memory_id)
async def delete_long_term(self, user_id: str, memory_id: str) -> bool:
memory = await self.get_long_term(user_id, memory_id)
if not memory:
return False
return await self.vector_store.delete(memory_id)
def clear_short_term(self, user_id: str):
"""清除短期记忆。"""
self.short_term.clear(user_id)
async def clear_long_term(self, user_id: str):
"""清除长期记忆。"""
await self.vector_store.clear_user(user_id)
async def close(self):
"""关闭记忆系统。"""
await self.vector_store.close()

View File

@@ -0,0 +1,7 @@
"""
AI模型实现
"""
from .openai_model import OpenAIModel
from .anthropic_model import AnthropicModel
__all__ = ['OpenAIModel', 'AnthropicModel']

View File

@@ -0,0 +1,120 @@
"""
Anthropic Claude模型实现
"""
from typing import List, Optional, AsyncIterator
from anthropic import AsyncAnthropic
from ..base import BaseAIModel, Message, ModelConfig
class AnthropicModel(BaseAIModel):
"""Anthropic Claude模型实现"""
def __init__(self, config: ModelConfig):
super().__init__(config)
self.client = AsyncAnthropic(
api_key=config.api_key,
base_url=config.api_base,
timeout=config.timeout
)
async def chat(
self,
messages: List[Message],
tools: Optional[List[dict]] = None,
**kwargs
) -> Message:
"""同步对话"""
# 分离system消息
system_message = None
formatted_messages = []
for msg in messages:
if msg.role == "system":
system_message = msg.content
else:
formatted_messages.append({
"role": msg.role,
"content": msg.content
})
params = {
"model": self.config.model_name,
"messages": formatted_messages,
"max_tokens": self.config.max_tokens,
"temperature": self.config.temperature,
}
if system_message:
params["system"] = system_message
if tools:
params["tools"] = tools
params.update(kwargs)
response = await self.client.messages.create(**params)
content = ""
tool_calls = []
for block in response.content:
if block.type == "text":
content += block.text
elif block.type == "tool_use":
tool_calls.append({
"id": block.id,
"type": "function",
"function": {
"name": block.name,
"arguments": block.input
}
})
return Message(
role="assistant",
content=content,
tool_calls=tool_calls if tool_calls else None
)
async def chat_stream(
self,
messages: List[Message],
tools: Optional[List[dict]] = None,
**kwargs
) -> AsyncIterator[str]:
"""流式对话"""
system_message = None
formatted_messages = []
for msg in messages:
if msg.role == "system":
system_message = msg.content
else:
formatted_messages.append({
"role": msg.role,
"content": msg.content
})
params = {
"model": self.config.model_name,
"messages": formatted_messages,
"max_tokens": self.config.max_tokens,
"temperature": self.config.temperature,
"stream": True,
}
if system_message:
params["system"] = system_message
if tools:
params["tools"] = tools
params.update(kwargs)
async with self.client.messages.stream(**params) as stream:
async for text in stream.text_stream:
yield text
async def embed(self, text: str) -> List[float]:
"""文本嵌入Anthropic不直接提供需要使用其他服务"""
raise NotImplementedError("Anthropic不提供嵌入API请使用OpenAI或其他服务")

View File

@@ -0,0 +1,235 @@
"""
OpenAI模型实现兼容OpenAI API的模型
"""
import json
import httpx
from typing import List, Optional, AsyncIterator, Dict, Any
from openai import AsyncOpenAI
from ..base import BaseAIModel, Message, ModelConfig
from src.utils.logger import setup_logger
logger = setup_logger('OpenAIModel')
class OpenAIModel(BaseAIModel):
"""OpenAI模型实现"""
def __init__(self, config: ModelConfig):
super().__init__(config)
self.logger = logger
# 创建支持UTF-8的httpx客户端
http_client = httpx.AsyncClient(
timeout=config.timeout,
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
)
self.client = AsyncOpenAI(
api_key=config.api_key,
base_url=config.api_base,
timeout=config.timeout,
http_client=http_client
)
async def chat(
self,
messages: List[Message],
tools: Optional[List[dict]] = None,
**kwargs
) -> Message:
"""同步对话"""
formatted_messages = [self._format_message(msg) for msg in messages]
params = {
"model": self.config.model_name,
"messages": formatted_messages,
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
"top_p": self.config.top_p,
"frequency_penalty": self.config.frequency_penalty,
"presence_penalty": self.config.presence_penalty,
}
if tools:
params["tools"] = tools
params.update(kwargs)
response = await self.client.chat.completions.create(**params)
choice = response.choices[0]
raw_tool_calls = (
choice.message.tool_calls
if hasattr(choice.message, 'tool_calls') and choice.message.tool_calls
else None
)
tool_calls = (
[self._normalize_tool_call(tool_call) for tool_call in raw_tool_calls]
if raw_tool_calls else None
)
return Message(
role="assistant",
content=choice.message.content or "",
tool_calls=tool_calls
)
async def chat_stream(
self,
messages: List[Message],
tools: Optional[List[dict]] = None,
**kwargs
) -> AsyncIterator[str]:
"""流式对话"""
formatted_messages = [self._format_message(msg) for msg in messages]
params = {
"model": self.config.model_name,
"messages": formatted_messages,
"temperature": self.config.temperature,
"max_tokens": self.config.max_tokens,
"stream": True,
}
if tools:
params["tools"] = tools
params.update(kwargs)
stream = await self.client.chat.completions.create(**params)
async for chunk in stream:
if chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
def _format_message(self, msg: Message) -> Dict[str, Any]:
"""将内部消息结构转换为OpenAI消息格式"""
formatted: Dict[str, Any] = {"role": msg.role}
if msg.role == "assistant":
formatted["content"] = msg.content if msg.content else None
if msg.tool_calls:
formatted["tool_calls"] = [
self._normalize_tool_call(tool_call)
for tool_call in msg.tool_calls
]
elif msg.role == "tool":
formatted["content"] = msg.content
if msg.tool_call_id:
formatted["tool_call_id"] = msg.tool_call_id
else:
formatted["content"] = msg.content
if msg.name:
formatted["name"] = msg.name
return formatted
def _normalize_tool_call(self, tool_call: Any) -> Dict[str, Any]:
"""将工具调用对象统一转换为字典"""
if isinstance(tool_call, dict):
normalized = dict(tool_call)
elif hasattr(tool_call, "model_dump"):
normalized = tool_call.model_dump()
else:
function = getattr(tool_call, "function", None)
if isinstance(function, dict):
function_name = function.get("name")
raw_arguments = function.get("arguments")
else:
function_name = getattr(function, "name", None)
raw_arguments = getattr(function, "arguments", None)
normalized = {
"id": getattr(tool_call, "id", None),
"type": getattr(tool_call, "type", "function"),
"function": {
"name": function_name,
"arguments": raw_arguments
}
}
function_data = normalized.get("function") or {}
if not isinstance(function_data, dict):
function_data = {
"name": getattr(function_data, "name", None),
"arguments": getattr(function_data, "arguments", None)
}
raw_arguments = function_data.get("arguments")
if isinstance(raw_arguments, dict):
arguments = json.dumps(raw_arguments, ensure_ascii=False)
elif raw_arguments is None:
arguments = "{}"
else:
arguments = str(raw_arguments)
function_data["arguments"] = arguments
normalized["function"] = function_data
normalized["type"] = normalized.get("type") or "function"
return normalized
@staticmethod
def _is_embedding_too_long_error(error: Exception) -> bool:
status_code = getattr(error, "status_code", None)
if status_code == 413:
return True
message = str(error).lower()
return (
"less than 512 tokens" in message
or "input too long" in message
or "maximum context length" in message
)
@staticmethod
def _shrink_text_for_embedding(text: str) -> str:
compact = " ".join((text or "").split())
if len(compact) <= 64:
return compact
target = max(64, int(len(compact) * 0.6))
if target >= len(compact):
target = max(64, len(compact) // 2)
head = target // 2
tail = target - head
return f"{compact[:head]} {compact[-tail:]}"
async def embed(self, text: str) -> List[float]:
"""?????"""
if isinstance(text, bytes):
text = text.decode("utf-8", errors="ignore")
raw_text = str(text or "")
candidate_text = raw_text.strip() or raw_text or " "
retry_count = 0
while True:
try:
response = await self.client.embeddings.create(
model=self.config.model_name,
input=candidate_text,
encoding_format="float"
)
return response.data[0].embedding
except Exception as e:
if self._is_embedding_too_long_error(e):
next_text = self._shrink_text_for_embedding(candidate_text)
if next_text and len(next_text) < len(candidate_text) and retry_count < 5:
retry_count += 1
self.logger.warning(
"embedding input too long, retry with truncated text: "
f"{len(candidate_text)} -> {len(next_text)}"
)
candidate_text = next_text
continue
import traceback
self.logger.error(f"embedding generation failed: {e}")
self.logger.error(f"model: {self.config.model_name}")
self.logger.error(f"text length: {len(candidate_text)}")
self.logger.error(f"text preview: {repr(candidate_text[:100])}")
self.logger.error(f"full traceback:\n{traceback.format_exc()}")
raise

232
src/ai/personality.py Normal file
View File

@@ -0,0 +1,232 @@
"""Personality system for role-play profiles."""
from dataclasses import dataclass, field
from enum import Enum
import json
from pathlib import Path
from typing import Dict, List, Optional
class PersonalityTrait(Enum):
"""Personality traits."""
FRIENDLY = "Friendly"
PROFESSIONAL = "Professional"
HUMOROUS = "Humorous"
SERIOUS = "Serious"
CREATIVE = "Creative"
ANALYTICAL = "Analytical"
EMPATHETIC = "Empathetic"
DIRECT = "Direct"
@dataclass
class PersonalityProfile:
"""Single personality profile."""
name: str
description: str
traits: List[PersonalityTrait]
speaking_style: str
example_responses: List[str] = field(default_factory=list)
custom_instructions: str = ""
def to_system_prompt(self) -> str:
"""Build plain-text system prompt."""
traits_text = ", ".join([t.value for t in self.traits]) if self.traits else "Friendly"
lines = [
"Role Setting",
f"You are {self.name}. {self.description}",
f"Traits: {traits_text}",
f"Speaking style: {self.speaking_style}",
"Output rule: plain text only. Do not use Markdown syntax.",
]
if self.example_responses:
lines.append("Reference responses:")
for idx, example in enumerate(self.example_responses, 1):
lines.append(f"{idx}. {example}")
if self.custom_instructions:
lines.append(f"Additional instructions: {self.custom_instructions}")
return "\n".join(lines)
class PersonalitySystem:
"""Personality management and persistence."""
def __init__(self, config_path: Optional[Path] = None):
self.config_path = config_path or Path("config/personalities.json")
self.personalities: Dict[str, PersonalityProfile] = {}
self.current_personality: Optional[PersonalityProfile] = None
self._load_personalities()
def _dict_to_profile(self, config: Dict) -> PersonalityProfile:
trait_names = config.get("traits", [])
traits: List[PersonalityTrait] = []
for trait_name in trait_names:
if trait_name in PersonalityTrait.__members__:
traits.append(PersonalityTrait[trait_name])
if not traits:
traits = [PersonalityTrait.FRIENDLY]
return PersonalityProfile(
name=str(config.get("name", "Assistant")),
description=str(config.get("description", "")),
traits=traits,
speaking_style=str(config.get("speaking_style", "Natural and concise")),
example_responses=list(config.get("example_responses", [])),
custom_instructions=str(config.get("custom_instructions", "")),
)
def _load_personalities(self):
"""Load personality config from disk or create defaults."""
if self.config_path.exists():
with open(self.config_path, "r", encoding="utf-8") as f:
data = json.load(f)
for key, config in data.items():
self.personalities[key] = self._dict_to_profile(config)
if "default" in self.personalities:
self.current_personality = self.personalities["default"]
elif self.personalities:
first_key = next(iter(self.personalities.keys()))
self.current_personality = self.personalities[first_key]
return
self._create_default_personalities()
def _create_default_personalities(self):
"""Create and persist built-in default profiles."""
default = PersonalityProfile(
name="Assistant",
description="A friendly and practical AI assistant.",
traits=[
PersonalityTrait.FRIENDLY,
PersonalityTrait.PROFESSIONAL,
PersonalityTrait.EMPATHETIC,
],
speaking_style="Warm, clear, and actionable.",
example_responses=[
"I understand. Let's solve this step by step.",
"Here is the result first, then the key details.",
],
)
tech_expert = PersonalityProfile(
name="Tech Expert",
description="A senior engineer focused on correctness and maintainability.",
traits=[
PersonalityTrait.PROFESSIONAL,
PersonalityTrait.ANALYTICAL,
PersonalityTrait.DIRECT,
],
speaking_style="Direct, structured, and implementation-oriented.",
)
creative = PersonalityProfile(
name="Creative Partner",
description="A collaborative creative role-play partner.",
traits=[
PersonalityTrait.CREATIVE,
PersonalityTrait.HUMOROUS,
PersonalityTrait.FRIENDLY,
],
speaking_style="Lively, imaginative, and expressive.",
)
self.personalities = {
"default": default,
"tech_expert": tech_expert,
"creative": creative,
}
self.current_personality = default
self._save_personalities()
def _save_personalities(self):
"""Persist personalities to disk."""
self.config_path.parent.mkdir(parents=True, exist_ok=True)
data = {}
for key, profile in self.personalities.items():
data[key] = {
"name": profile.name,
"description": profile.description,
"traits": [trait.name for trait in profile.traits],
"speaking_style": profile.speaking_style,
"example_responses": profile.example_responses,
"custom_instructions": profile.custom_instructions,
}
with open(self.config_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
def set_personality(self, key: str) -> bool:
"""Switch active personality by key."""
if key not in self.personalities:
return False
self.current_personality = self.personalities[key]
return True
def get_system_prompt(self) -> str:
"""Get current personality prompt."""
if self.current_personality:
return self.current_personality.to_system_prompt()
return ""
def add_personality(self, key: str, profile: PersonalityProfile) -> bool:
"""Add a new personality profile."""
key = key.strip()
if not key:
return False
self.personalities[key] = profile
if not self.current_personality:
self.current_personality = profile
self._save_personalities()
return True
def remove_personality(self, key: str) -> bool:
"""Remove a personality profile."""
if key == "default":
return False
if key not in self.personalities:
return False
removed_profile = self.personalities[key]
del self.personalities[key]
if self.current_personality == removed_profile:
if "default" in self.personalities:
self.current_personality = self.personalities["default"]
elif self.personalities:
first_key = next(iter(self.personalities.keys()))
self.current_personality = self.personalities[first_key]
else:
self.current_personality = None
self._save_personalities()
return True
def list_personalities(self) -> List[str]:
"""List all personality keys."""
return sorted(self.personalities.keys())
def get_personality(self, key: str) -> Optional[PersonalityProfile]:
"""Get personality by key."""
return self.personalities.get(key)

View File

@@ -0,0 +1,6 @@
"""
Skills系统初始化
"""
from .base import Skill, SkillsManager, SkillMetadata, create_skill_template
__all__ = ['Skill', 'SkillsManager', 'SkillMetadata', 'create_skill_template']

552
src/ai/skills/base.py Normal file
View File

@@ -0,0 +1,552 @@
"""
Skills 系统 - 可扩展技能插件框架。
"""
from dataclasses import dataclass
import importlib
import inspect
import json
from pathlib import Path
import re
import shutil
import sys
import tempfile
import time
from typing import Any, Callable, Dict, List, Optional, Tuple
import urllib.request
import zipfile
import os
import stat
from src.utils.logger import setup_logger
logger = setup_logger("SkillsSystem")
@dataclass
class SkillMetadata:
"""技能元数据。"""
name: str
version: str
description: str
author: str
dependencies: List[str]
enabled: bool = True
class Skill:
"""技能基类。"""
def __init__(self):
self.metadata: Optional[SkillMetadata] = None
self.tools: Dict[str, Callable] = {}
self.manager = None
async def initialize(self):
"""初始化技能。"""
async def cleanup(self):
"""清理技能。"""
def get_tools(self) -> Dict[str, Callable]:
"""获取技能提供的工具。"""
return self.tools
def register_tool(self, name: str, func: Callable):
"""注册工具。"""
self.tools[name] = func
class SkillsManager:
"""技能管理器。"""
_SKILL_KEY_PATTERN = re.compile(r"[^a-zA-Z0-9_]")
_GITHUB_SHORTCUT_PATTERN = re.compile(
r"^[A-Za-z0-9_.-]+/[A-Za-z0-9_.-]+(?:#[A-Za-z0-9_.-]+)?$"
)
def __init__(self, skills_dir: Path):
self.skills_dir = skills_dir
self.skills: Dict[str, Skill] = {}
self.skills_dir.mkdir(parents=True, exist_ok=True)
logger.info(f"✅ Skills 目录: {skills_dir}")
@classmethod
def normalize_skill_key(cls, raw_name: str) -> str:
"""将任意输入规范化为可导入的 Python 包名。"""
key = raw_name.strip().lower().replace("-", "_").replace(" ", "_")
key = cls._SKILL_KEY_PATTERN.sub("_", key)
key = re.sub(r"_+", "_", key).strip("_")
if not key:
raise ValueError("技能名不能为空")
if key[0].isdigit():
key = f"skill_{key}"
return key
def _get_skill_path(self, skill_name: str) -> Path:
return self.skills_dir / self.normalize_skill_key(skill_name)
@staticmethod
def _on_rmtree_error(func, path, exc_info):
"""Handle Windows readonly/locked file deletion errors."""
try:
os.chmod(path, stat.S_IWRITE)
func(path)
except Exception:
# Keep original failure path for upper retry logic.
pass
def _read_metadata(self, skill_path: Path, fallback_name: str) -> Dict[str, Any]:
metadata_file = skill_path / "skill.json"
if metadata_file.exists():
with open(metadata_file, "r", encoding="utf-8") as f:
metadata = json.load(f)
else:
metadata = {}
metadata.setdefault("name", fallback_name)
metadata.setdefault("version", "1.0.0")
metadata.setdefault("description", f"{fallback_name} skill")
metadata.setdefault("author", "unknown")
metadata.setdefault("dependencies", [])
metadata.setdefault("enabled", True)
with open(metadata_file, "w", encoding="utf-8") as f:
json.dump(metadata, f, ensure_ascii=False, indent=2)
return metadata
def _ensure_skill_package_layout(self, skill_path: Path, skill_key: str):
"""确保技能目录满足运行最小结构。"""
skill_path.mkdir(parents=True, exist_ok=True)
init_file = skill_path / "__init__.py"
if not init_file.exists():
init_file.write_text("", encoding="utf-8")
main_file = skill_path / "main.py"
if not main_file.exists():
template = f'''"""{skill_key} skill"""
from src.ai.skills.base import Skill
class {"".join(p.capitalize() for p in skill_key.split("_"))}Skill(Skill):
async def initialize(self):
self.register_tool("ping", self.ping)
async def ping(self, text: str = "ok") -> str:
return text
async def cleanup(self):
pass
'''
main_file.write_text(template, encoding="utf-8")
self._read_metadata(skill_path, skill_key)
async def load_skill(self, skill_name: str) -> bool:
"""加载技能。"""
try:
skill_name = self.normalize_skill_key(skill_name)
if skill_name in self.skills:
logger.info(f"✅ 技能已加载: {skill_name}")
return True
skill_path = self._get_skill_path(skill_name)
if not skill_path.exists():
logger.error(f"❌ 技能不存在: {skill_name}")
return False
metadata_file = skill_path / "skill.json"
if not metadata_file.exists():
logger.error(f"❌ 技能元数据不存在: {skill_name}")
return False
with open(metadata_file, "r", encoding="utf-8") as f:
metadata_dict = json.load(f)
metadata = SkillMetadata(**metadata_dict)
if not metadata.enabled:
logger.info(f"⏸️ 技能已禁用: {skill_name}")
return False
module_path = f"skills.{skill_name}.main"
importlib.invalidate_caches()
try:
old_dont_write = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
if module_path in sys.modules:
module = importlib.reload(sys.modules[module_path])
else:
module = importlib.import_module(module_path)
finally:
sys.dont_write_bytecode = old_dont_write
except Exception as exc:
logger.error(f"❌ 无法导入技能模块 {module_path}: {exc}")
return False
skill_class = None
for _, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(obj, Skill) and obj != Skill:
skill_class = obj
break
if not skill_class:
logger.error(f"❌ 技能中未找到 Skill 子类: {skill_name}")
return False
skill = skill_class()
skill.metadata = metadata
skill.manager = self
await skill.initialize()
self.skills[skill_name] = skill
logger.info(f"✅ 加载技能: {skill_name} v{metadata.version}")
return True
except Exception as exc:
logger.error(f"❌ 加载技能失败 {skill_name}: {exc}")
return False
async def load_all_skills(self):
"""加载所有可用技能。"""
for skill_name in self.list_available_skills():
await self.load_skill(skill_name)
async def unload_skill(self, skill_name: str) -> bool:
"""仅卸载内存中的技能。"""
skill_name = self.normalize_skill_key(skill_name)
if skill_name not in self.skills:
return False
skill = self.skills[skill_name]
await skill.cleanup()
del self.skills[skill_name]
sys.modules.pop(f"skills.{skill_name}.main", None)
sys.modules.pop(f"skills.{skill_name}", None)
importlib.invalidate_caches()
logger.info(f"✅ 卸载技能: {skill_name}")
return True
async def uninstall_skill(self, skill_name: str, delete_files: bool = True) -> bool:
"""卸载技能并可选删除文件。"""
skill_name = self.normalize_skill_key(skill_name)
if skill_name in self.skills:
await self.unload_skill(skill_name)
if not delete_files:
return True
skill_path = self._get_skill_path(skill_name)
if not skill_path.exists():
return False
removed = False
for _ in range(3):
try:
shutil.rmtree(skill_path, ignore_errors=False, onerror=self._on_rmtree_error)
except PermissionError:
pass
if not skill_path.exists():
removed = True
break
time.sleep(0.2)
if not removed:
try:
metadata_file = skill_path / "skill.json"
metadata = {}
if metadata_file.exists():
with open(metadata_file, "r", encoding="utf-8") as f:
metadata = json.load(f)
metadata["enabled"] = False
with open(metadata_file, "w", encoding="utf-8") as f:
json.dump(metadata, f, ensure_ascii=False, indent=2)
logger.warning(f"⚠️ 删除目录失败,已软卸载技能: {skill_name}")
return True
except Exception:
return False
importlib.invalidate_caches()
logger.info(f"✅ 删除技能目录: {skill_name}")
return True
def get_skill(self, skill_name: str) -> Optional[Skill]:
"""获取已加载技能实例。"""
skill_name = self.normalize_skill_key(skill_name)
return self.skills.get(skill_name)
def list_skills(self) -> List[str]:
"""列出已加载技能。"""
return sorted(self.skills.keys())
def list_available_skills(self) -> List[str]:
"""列出可加载技能目录。"""
if not self.skills_dir.exists():
return []
available: List[str] = []
for skill_dir in self.skills_dir.iterdir():
if not skill_dir.is_dir() or skill_dir.name.startswith("_"):
continue
if (skill_dir / "skill.json").exists() and (skill_dir / "main.py").exists():
try:
with open(skill_dir / "skill.json", "r", encoding="utf-8") as f:
metadata = json.load(f)
if not metadata.get("enabled", True):
continue
available.append(self.normalize_skill_key(skill_dir.name))
except ValueError:
continue
except Exception:
continue
return sorted(set(available))
def get_all_tools(self) -> Dict[str, Callable]:
"""获取全部技能工具。"""
all_tools: Dict[str, Callable] = {}
for skill_name, skill in self.skills.items():
for tool_name, tool_func in skill.get_tools().items():
all_tools[f"{skill_name}.{tool_name}"] = tool_func
return all_tools
async def reload_skill(self, skill_name: str) -> bool:
"""重载技能。"""
skill_name = self.normalize_skill_key(skill_name)
if skill_name in self.skills:
await self.unload_skill(skill_name)
return await self.load_skill(skill_name)
def _resolve_network_url(self, source: str) -> str:
"""支持 URL 与 GitHub 简写。"""
source = source.strip()
if source.startswith(("http://", "https://")):
return source
if self._GITHUB_SHORTCUT_PATTERN.match(source):
repo, _, branch = source.partition("#")
branch = branch or "main"
return f"https://codeload.github.com/{repo}/zip/refs/heads/{branch}"
raise ValueError("source 必须是 URL 或 owner/repo[#branch]")
def _download_zip(self, url: str, output_zip: Path):
"""下载 zip 包到本地。"""
req = urllib.request.Request(url, headers={"User-Agent": "QQBot-Skills/1.0"})
with urllib.request.urlopen(req, timeout=30) as resp:
data = resp.read()
output_zip.write_bytes(data)
def _find_skill_candidates(self, root_dir: Path) -> List[Tuple[str, Path]]:
"""在目录中扫描技能候选项。"""
candidates: List[Tuple[str, Path]] = []
for metadata_file in root_dir.rglob("skill.json"):
candidate_dir = metadata_file.parent
if not (candidate_dir / "main.py").exists():
continue
try:
with open(metadata_file, "r", encoding="utf-8") as f:
metadata = json.load(f)
raw_name = str(metadata.get("name") or candidate_dir.name)
except Exception:
raw_name = candidate_dir.name
try:
skill_key = self.normalize_skill_key(raw_name)
except ValueError:
continue
candidates.append((skill_key, candidate_dir))
uniq: Dict[str, Path] = {}
for key, path in candidates:
uniq[key] = path
return sorted(uniq.items(), key=lambda x: x[0])
def install_skill_from_source(
self,
source: str,
skill_name: Optional[str] = None,
overwrite: bool = False,
) -> Tuple[bool, str]:
"""从网络或本地源安装技能目录(仅落盘,不自动加载)。"""
desired_key = self.normalize_skill_key(skill_name) if skill_name else None
with tempfile.TemporaryDirectory(prefix="qqbot_skill_") as tmp:
tmp_dir = Path(tmp)
extract_root: Optional[Path] = None
source_path = Path(source)
if source_path.exists():
if source_path.is_dir():
extract_root = source_path
elif source_path.is_file() and source_path.suffix.lower() == ".zip":
with zipfile.ZipFile(source_path, "r") as zf:
zf.extractall(tmp_dir / "extract")
extract_root = tmp_dir / "extract"
else:
return False, "本地 source 仅支持目录或 zip 文件"
else:
try:
url = self._resolve_network_url(source)
except ValueError as exc:
return False, str(exc)
download_zip = tmp_dir / "download.zip"
try:
self._download_zip(url, download_zip)
except Exception as exc:
# GitHub 简写默认 main 失败时尝试 master
if "codeload.github.com" in url and url.endswith("/main"):
fallback = url[:-4] + "master"
try:
self._download_zip(fallback, download_zip)
except Exception:
return False, f"下载技能失败: {exc}"
else:
return False, f"下载技能失败: {exc}"
try:
with zipfile.ZipFile(download_zip, "r") as zf:
zf.extractall(tmp_dir / "extract")
except Exception as exc:
return False, f"解压技能失败: {exc}"
extract_root = tmp_dir / "extract"
candidates = self._find_skill_candidates(extract_root)
if not candidates:
return False, "未找到可安装技能(需包含 skill.json 与 main.py"
selected_key: Optional[str] = None
selected_path: Optional[Path] = None
if desired_key:
for key, path in candidates:
if key == desired_key:
selected_key, selected_path = key, path
break
if not selected_path:
names = ", ".join([k for k, _ in candidates])
return False, f"源中未找到技能 {desired_key},可选: {names}"
else:
if len(candidates) > 1:
names = ", ".join([k for k, _ in candidates])
return False, f"检测到多个技能,请指定 skill_name。可选: {names}"
selected_key, selected_path = candidates[0]
assert selected_key is not None and selected_path is not None
target_path = self._get_skill_path(selected_key)
if target_path.exists():
if not overwrite:
return False, f"技能已存在: {selected_key}"
shutil.rmtree(target_path)
shutil.copytree(
selected_path,
target_path,
ignore=shutil.ignore_patterns("__pycache__", "*.pyc", ".git", ".github"),
)
self._ensure_skill_package_layout(target_path, selected_key)
importlib.invalidate_caches()
logger.info(f"✅ 安装技能成功: {selected_key} <- {source}")
return True, selected_key
def create_skill_template(
skill_name: str,
output_dir: Path,
description: str = "技能描述",
author: str = "QQBot",
):
"""创建技能模板。"""
skill_key = SkillsManager.normalize_skill_key(skill_name)
skill_dir = output_dir / skill_key
skill_dir.mkdir(parents=True, exist_ok=True)
metadata = {
"name": skill_key,
"version": "1.0.0",
"description": description,
"author": author,
"dependencies": [],
"enabled": True,
}
with open(skill_dir / "skill.json", "w", encoding="utf-8") as f:
json.dump(metadata, f, ensure_ascii=False, indent=2)
class_name = "".join(word.capitalize() for word in skill_key.split("_")) + "Skill"
main_code = f'''"""{skill_key} skill"""
from src.ai.skills.base import Skill
class {class_name}(Skill):
async def initialize(self):
self.register_tool("example_tool", self.example_tool)
async def example_tool(self, text: str) -> str:
return f"{skill_key} 收到: {{text}}"
async def cleanup(self):
pass
'''
with open(skill_dir / "main.py", "w", encoding="utf-8") as f:
f.write(main_code)
with open(skill_dir / "__init__.py", "w", encoding="utf-8") as f:
f.write("")
readme = f"""# {skill_key}
## 描述
{description}
## 工具
- example_tool(text)
"""
with open(skill_dir / "README.md", "w", encoding="utf-8") as f:
f.write(readme)
logger.info(f"✅ 创建技能模板: {skill_dir}")

327
src/ai/task_manager.py Normal file
View File

@@ -0,0 +1,327 @@
"""
长任务管理器 - 处理需要多步骤的复杂任务
"""
import asyncio
from typing import List, Dict, Optional, Callable, Any
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
import json
from pathlib import Path
import uuid
class TaskStatus(Enum):
"""任务状态"""
PENDING = "pending"
RUNNING = "running"
PAUSED = "paused"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
@dataclass
class TaskStep:
"""任务步骤"""
step_id: str
description: str
action: str
params: Dict[str, Any]
status: TaskStatus = TaskStatus.PENDING
result: Optional[Any] = None
error: Optional[str] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
def to_dict(self) -> Dict:
return {
'step_id': self.step_id,
'description': self.description,
'action': self.action,
'params': self.params,
'status': self.status.value,
'result': self.result,
'error': self.error,
'started_at': self.started_at.isoformat() if self.started_at else None,
'completed_at': self.completed_at.isoformat() if self.completed_at else None
}
@dataclass
class LongTask:
"""长任务"""
task_id: str
user_id: str
title: str
description: str
steps: List[TaskStep] = field(default_factory=list)
status: TaskStatus = TaskStatus.PENDING
created_at: datetime = field(default_factory=datetime.now)
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
progress: float = 0.0
metadata: Dict = field(default_factory=dict)
def to_dict(self) -> Dict:
return {
'task_id': self.task_id,
'user_id': self.user_id,
'title': self.title,
'description': self.description,
'steps': [step.to_dict() for step in self.steps],
'status': self.status.value,
'created_at': self.created_at.isoformat(),
'started_at': self.started_at.isoformat() if self.started_at else None,
'completed_at': self.completed_at.isoformat() if self.completed_at else None,
'progress': self.progress,
'metadata': self.metadata
}
@classmethod
def from_dict(cls, data: Dict) -> 'LongTask':
steps = [
TaskStep(
step_id=s['step_id'],
description=s['description'],
action=s['action'],
params=s['params'],
status=TaskStatus(s['status']),
result=s.get('result'),
error=s.get('error'),
started_at=datetime.fromisoformat(s['started_at']) if s.get('started_at') else None,
completed_at=datetime.fromisoformat(s['completed_at']) if s.get('completed_at') else None
)
for s in data['steps']
]
return cls(
task_id=data['task_id'],
user_id=data['user_id'],
title=data['title'],
description=data['description'],
steps=steps,
status=TaskStatus(data['status']),
created_at=datetime.fromisoformat(data['created_at']),
started_at=datetime.fromisoformat(data['started_at']) if data.get('started_at') else None,
completed_at=datetime.fromisoformat(data['completed_at']) if data.get('completed_at') else None,
progress=data.get('progress', 0.0),
metadata=data.get('metadata', {})
)
class LongTaskManager:
"""长任务管理器"""
def __init__(self, storage_path: Path):
self.storage_path = storage_path
self.tasks: Dict[str, LongTask] = {}
self.action_handlers: Dict[str, Callable] = {}
self.running_tasks: Dict[str, asyncio.Task] = {}
self._load()
def _load(self):
"""加载任务"""
if self.storage_path.exists():
with open(self.storage_path, 'r', encoding='utf-8') as f:
data = json.load(f)
for task_data in data:
task = LongTask.from_dict(task_data)
self.tasks[task.task_id] = task
def _save(self):
"""保存任务"""
self.storage_path.parent.mkdir(parents=True, exist_ok=True)
data = [task.to_dict() for task in self.tasks.values()]
with open(self.storage_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
def register_action(self, action_name: str, handler: Callable):
"""注册动作处理器"""
self.action_handlers[action_name] = handler
def create_task(
self,
user_id: str,
title: str,
description: str,
steps: List[Dict],
metadata: Optional[Dict] = None
) -> str:
"""创建任务"""
task_id = str(uuid.uuid4())
task_steps = [
TaskStep(
step_id=str(uuid.uuid4()),
description=step['description'],
action=step['action'],
params=step.get('params', {})
)
for step in steps
]
task = LongTask(
task_id=task_id,
user_id=user_id,
title=title,
description=description,
steps=task_steps,
metadata=metadata or {}
)
self.tasks[task_id] = task
self._save()
return task_id
async def execute_task(
self,
task_id: str,
progress_callback: Optional[Callable[[str, float, str], None]] = None
) -> bool:
"""执行任务"""
if task_id not in self.tasks:
return False
task = self.tasks[task_id]
if task.status == TaskStatus.RUNNING:
return False
task.status = TaskStatus.RUNNING
task.started_at = datetime.now()
self._save()
try:
total_steps = len(task.steps)
for i, step in enumerate(task.steps):
# 检查是否被取消
if task.status == TaskStatus.CANCELLED:
break
step.status = TaskStatus.RUNNING
step.started_at = datetime.now()
# 执行步骤
try:
handler = self.action_handlers.get(step.action)
if not handler:
raise ValueError(f"未找到动作处理器: {step.action}")
result = await handler(**step.params)
step.result = result
step.status = TaskStatus.COMPLETED
except Exception as e:
step.error = str(e)
step.status = TaskStatus.FAILED
task.status = TaskStatus.FAILED
self._save()
return False
finally:
step.completed_at = datetime.now()
# 更新进度
task.progress = (i + 1) / total_steps
self._save()
if progress_callback:
await progress_callback(
task_id,
task.progress,
f"完成步骤 {i+1}/{total_steps}: {step.description}"
)
task.status = TaskStatus.COMPLETED
task.completed_at = datetime.now()
task.progress = 1.0
self._save()
return True
except Exception as e:
task.status = TaskStatus.FAILED
self._save()
raise e
async def start_task(
self,
task_id: str,
progress_callback: Optional[Callable[[str, float, str], None]] = None
):
"""启动任务(异步)"""
if task_id in self.running_tasks:
return
async def run():
try:
await self.execute_task(task_id, progress_callback)
finally:
if task_id in self.running_tasks:
del self.running_tasks[task_id]
self.running_tasks[task_id] = asyncio.create_task(run())
def pause_task(self, task_id: str) -> bool:
"""暂停任务"""
if task_id not in self.tasks:
return False
task = self.tasks[task_id]
if task.status == TaskStatus.RUNNING:
task.status = TaskStatus.PAUSED
self._save()
return True
return False
def cancel_task(self, task_id: str) -> bool:
"""取消任务"""
if task_id not in self.tasks:
return False
task = self.tasks[task_id]
task.status = TaskStatus.CANCELLED
self._save()
# 取消正在运行的任务
if task_id in self.running_tasks:
self.running_tasks[task_id].cancel()
return True
def get_task(self, task_id: str) -> Optional[LongTask]:
"""获取任务"""
return self.tasks.get(task_id)
def get_user_tasks(self, user_id: str) -> List[LongTask]:
"""获取用户的所有任务"""
return [
task for task in self.tasks.values()
if task.user_id == user_id
]
def get_task_status(self, task_id: str) -> Optional[Dict]:
"""获取任务状态"""
task = self.get_task(task_id)
if not task:
return None
completed_steps = sum(1 for step in task.steps if step.status == TaskStatus.COMPLETED)
total_steps = len(task.steps)
return {
'task_id': task.task_id,
'title': task.title,
'status': task.status.value,
'progress': task.progress,
'completed_steps': completed_steps,
'total_steps': total_steps,
'current_step': next(
(step.description for step in task.steps if step.status == TaskStatus.RUNNING),
None
)
}

View File

@@ -0,0 +1,13 @@
"""
向量存储模块
"""
from .base import VectorStore, VectorMemory
from .chroma_store import ChromaVectorStore
from .json_store import JSONVectorStore
__all__ = [
'VectorStore',
'VectorMemory',
'ChromaVectorStore',
'JSONVectorStore'
]

113
src/ai/vector_store/base.py Normal file
View File

@@ -0,0 +1,113 @@
"""
向量数据库抽象层
"""
from abc import ABC, abstractmethod
from typing import List, Dict, Optional, Tuple
from dataclasses import dataclass
from datetime import datetime
@dataclass
class VectorMemory:
"""向量记忆项"""
id: str
user_id: str
content: str
embedding: List[float]
importance: float
timestamp: datetime
metadata: Dict
access_count: int = 0
last_access: Optional[datetime] = None
def to_dict(self) -> Dict:
"""转换为字典"""
return {
'id': self.id,
'user_id': self.user_id,
'content': self.content,
'embedding': self.embedding,
'importance': self.importance,
'timestamp': self.timestamp.isoformat(),
'metadata': self.metadata,
'access_count': self.access_count,
'last_access': self.last_access.isoformat() if self.last_access else None
}
@classmethod
def from_dict(cls, data: Dict) -> 'VectorMemory':
"""从字典创建"""
return cls(
id=data['id'],
user_id=data['user_id'],
content=data['content'],
embedding=data['embedding'],
importance=data['importance'],
timestamp=datetime.fromisoformat(data['timestamp']),
metadata=data.get('metadata', {}),
access_count=data.get('access_count', 0),
last_access=datetime.fromisoformat(data['last_access']) if data.get('last_access') else None
)
class VectorStore(ABC):
"""向量存储抽象基类"""
@abstractmethod
async def add(
self,
id: str,
user_id: str,
content: str,
embedding: List[float],
importance: float,
metadata: Optional[Dict] = None
) -> bool:
"""添加记忆"""
pass
@abstractmethod
async def search(
self,
user_id: str,
query_embedding: List[float],
limit: int = 5,
min_importance: float = 0.3
) -> List[VectorMemory]:
"""搜索相似记忆"""
pass
@abstractmethod
async def get_by_importance(
self,
user_id: str,
limit: int = 5,
min_importance: float = 0.3
) -> List[VectorMemory]:
"""按重要性获取记忆"""
pass
@abstractmethod
async def update_access(self, memory_id: str) -> bool:
"""更新访问记录"""
pass
@abstractmethod
async def delete(self, memory_id: str) -> bool:
"""删除记忆"""
pass
@abstractmethod
async def get_all(self, user_id: str) -> List[VectorMemory]:
"""获取用户所有记忆"""
pass
@abstractmethod
async def clear_user(self, user_id: str) -> bool:
"""清除用户所有记忆"""
pass
@abstractmethod
async def close(self):
"""关闭连接"""
pass

View File

@@ -0,0 +1,336 @@
"""
Chroma向量数据库实现
"""
import uuid
from typing import List, Dict, Optional
from pathlib import Path
from datetime import datetime
import chromadb
from chromadb.config import Settings
from .base import VectorStore, VectorMemory
from src.utils.logger import setup_logger
logger = setup_logger('ChromaStore')
class ChromaVectorStore(VectorStore):
"""Chroma向量数据库实现"""
def __init__(self, persist_directory: Path):
"""初始化Chroma客户端"""
self.persist_directory = persist_directory
self.persist_directory.mkdir(parents=True, exist_ok=True)
# 创建Chroma客户端
self.client = chromadb.PersistentClient(
path=str(persist_directory),
settings=Settings(
anonymized_telemetry=False,
allow_reset=True
)
)
# 获取或创建集合
self.collection = self.client.get_or_create_collection(
name="long_term_memory",
metadata={"description": "Long-term memory storage"}
)
logger.info(f"✅ Chroma向量数据库初始化: {persist_directory}")
@staticmethod
def _to_list(value):
"""将可能的 numpy 数组等序列转为 Python list。"""
if value is None:
return []
if hasattr(value, "tolist"):
return value.tolist()
if isinstance(value, list):
return value
return list(value)
async def add(
self,
id: str,
user_id: str,
content: str,
embedding: List[float],
importance: float,
metadata: Optional[Dict] = None
) -> bool:
"""添加记忆"""
try:
if not embedding:
logger.warning(f"跳过写入长期记忆embedding为空: {id}")
return False
now = datetime.now()
# 准备元数据
meta = {
'user_id': user_id,
'importance': importance,
'timestamp': now.isoformat(),
'access_count': 0,
'last_access': '',
}
if metadata:
# 只保存可序列化的元数据
for key, value in metadata.items():
if isinstance(value, (str, int, float, bool)):
meta[f'meta_{key}'] = value
# 添加到Chroma
self.collection.add(
ids=[id],
embeddings=[embedding],
documents=[content],
metadatas=[meta]
)
logger.debug(f"添加记忆: {id} (用户: {user_id})")
return True
except Exception as e:
logger.error(f"添加记忆失败: {e}")
return False
async def search(
self,
user_id: str,
query_embedding: List[float],
limit: int = 5,
min_importance: float = 0.3
) -> List[VectorMemory]:
"""搜索相似记忆"""
try:
# 查询Chroma
results = self.collection.query(
query_embeddings=[query_embedding],
n_results=limit * 2, # 多查询一些,用于过滤
where={"user_id": user_id}
)
ids = self._to_list(results.get('ids'))
if len(ids) == 0:
return []
first_row_ids = self._to_list(ids[0])
if len(first_row_ids) == 0:
return []
metadatas = self._to_list(results.get('metadatas'))
documents = self._to_list(results.get('documents'))
embeddings = self._to_list(results.get('embeddings'))
first_row_metas = self._to_list(metadatas[0]) if len(metadatas) > 0 else []
first_row_docs = self._to_list(documents[0]) if len(documents) > 0 else []
first_row_embeddings = self._to_list(embeddings[0]) if len(embeddings) > 0 else []
# 转换结果
memories = []
for i in range(len(first_row_ids)):
if i >= len(first_row_metas) or i >= len(first_row_docs):
continue
meta = first_row_metas[i]
# 过滤重要性
if meta['importance'] < min_importance:
continue
# 提取自定义元数据
custom_meta = {}
for key, value in meta.items():
if key.startswith('meta_'):
custom_meta[key[5:]] = value
memory = VectorMemory(
id=first_row_ids[i],
user_id=meta['user_id'],
content=first_row_docs[i],
embedding=self._to_list(first_row_embeddings[i]) if i < len(first_row_embeddings) else [],
importance=meta['importance'],
timestamp=datetime.fromisoformat(meta['timestamp']),
metadata=custom_meta,
access_count=meta.get('access_count', 0),
last_access=datetime.fromisoformat(meta['last_access']) if meta.get('last_access') else None
)
memories.append(memory)
if len(memories) >= limit:
break
return memories
except Exception as e:
logger.error(f"搜索记忆失败: {e}")
return []
async def get_by_importance(
self,
user_id: str,
limit: int = 5,
min_importance: float = 0.3
) -> List[VectorMemory]:
"""按重要性获取记忆"""
try:
# 获取所有用户记忆
results = self.collection.get(
where={"user_id": user_id},
include=['embeddings', 'documents', 'metadatas']
)
ids = self._to_list(results.get('ids'))
if len(ids) == 0:
return []
metadatas = self._to_list(results.get('metadatas'))
documents = self._to_list(results.get('documents'))
embeddings = self._to_list(results.get('embeddings'))
# 转换并排序
memories = []
for i in range(len(ids)):
if i >= len(metadatas) or i >= len(documents):
continue
meta = metadatas[i]
if meta['importance'] < min_importance:
continue
custom_meta = {}
for key, value in meta.items():
if key.startswith('meta_'):
custom_meta[key[5:]] = value
memory = VectorMemory(
id=ids[i],
user_id=meta['user_id'],
content=documents[i],
embedding=self._to_list(embeddings[i]) if i < len(embeddings) else [],
importance=meta['importance'],
timestamp=datetime.fromisoformat(meta['timestamp']),
metadata=custom_meta,
access_count=meta.get('access_count', 0),
last_access=datetime.fromisoformat(meta['last_access']) if meta.get('last_access') else None
)
memories.append(memory)
# 按重要性和时间排序
memories.sort(key=lambda m: (m.importance, m.timestamp), reverse=True)
return memories[:limit]
except Exception as e:
logger.error(f"获取记忆失败: {e}")
return []
async def update_access(self, memory_id: str) -> bool:
"""更新访问记录"""
try:
# 获取当前记忆
result = self.collection.get(
ids=[memory_id],
include=['metadatas']
)
ids = self._to_list(result.get('ids'))
if len(ids) == 0:
return False
metadatas = self._to_list(result.get('metadatas'))
if len(metadatas) == 0:
return False
meta = metadatas[0]
meta['access_count'] = meta.get('access_count', 0) + 1
meta['last_access'] = datetime.now().isoformat()
# 更新元数据
self.collection.update(
ids=[memory_id],
metadatas=[meta]
)
return True
except Exception as e:
logger.error(f"更新访问记录失败: {e}")
return False
async def delete(self, memory_id: str) -> bool:
"""删除记忆"""
try:
self.collection.delete(ids=[memory_id])
return True
except Exception as e:
logger.error(f"删除记忆失败: {e}")
return False
async def get_all(self, user_id: str) -> List[VectorMemory]:
"""获取用户所有记忆"""
try:
results = self.collection.get(
where={"user_id": user_id},
include=['embeddings', 'documents', 'metadatas']
)
ids = self._to_list(results.get('ids'))
if len(ids) == 0:
return []
metadatas = self._to_list(results.get('metadatas'))
documents = self._to_list(results.get('documents'))
embeddings = self._to_list(results.get('embeddings'))
memories = []
for i in range(len(ids)):
if i >= len(metadatas) or i >= len(documents):
continue
meta = metadatas[i]
custom_meta = {}
for key, value in meta.items():
if key.startswith('meta_'):
custom_meta[key[5:]] = value
memory = VectorMemory(
id=ids[i],
user_id=meta['user_id'],
content=documents[i],
embedding=self._to_list(embeddings[i]) if i < len(embeddings) else [],
importance=meta['importance'],
timestamp=datetime.fromisoformat(meta['timestamp']),
metadata=custom_meta,
access_count=meta.get('access_count', 0),
last_access=datetime.fromisoformat(meta['last_access']) if meta.get('last_access') else None
)
memories.append(memory)
return memories
except Exception as e:
logger.error(f"获取所有记忆失败: {e}")
return []
async def clear_user(self, user_id: str) -> bool:
"""清除用户所有记忆"""
try:
# 获取用户所有记忆ID
results = self.collection.get(
where={"user_id": user_id}
)
ids = self._to_list(results.get('ids'))
if len(ids) > 0:
self.collection.delete(ids=ids)
logger.info(f"清除用户记忆: {user_id}")
return True
except Exception as e:
logger.error(f"清除用户记忆失败: {e}")
return False
async def close(self):
"""关闭连接"""
# Chroma会自动持久化无需特殊关闭操作
logger.info("Chroma向量数据库已关闭")

View File

@@ -0,0 +1,198 @@
"""
JSON文件存储实现向后兼容
"""
import json
import uuid
from typing import List, Dict, Optional
from pathlib import Path
from datetime import datetime
import numpy as np
from .base import VectorStore, VectorMemory
from src.utils.logger import setup_logger
logger = setup_logger('JSONStore')
class JSONVectorStore(VectorStore):
"""JSON文件存储实现向后兼容旧版本"""
def __init__(self, storage_path: Path):
"""初始化JSON存储"""
self.storage_path = storage_path
self.memories: Dict[str, List[VectorMemory]] = {} # user_id -> List[VectorMemory]
self._load()
logger.info(f"✅ JSON存储初始化: {storage_path}")
def _load(self):
"""加载记忆"""
if self.storage_path.exists():
try:
with open(self.storage_path, 'r', encoding='utf-8') as f:
data = json.load(f)
for user_id, items in data.items():
self.memories[user_id] = []
for item in items:
# 兼容旧格式
if 'id' not in item:
item['id'] = str(uuid.uuid4())
memory = VectorMemory.from_dict(item)
self.memories[user_id].append(memory)
logger.info(f"加载了 {sum(len(v) for v in self.memories.values())} 条记忆")
except Exception as e:
logger.error(f"加载记忆失败: {e}")
self.memories = {}
def _save(self):
"""保存记忆"""
try:
self.storage_path.parent.mkdir(parents=True, exist_ok=True)
data = {
user_id: [memory.to_dict() for memory in memories]
for user_id, memories in self.memories.items()
}
with open(self.storage_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"保存记忆失败: {e}")
async def add(
self,
id: str,
user_id: str,
content: str,
embedding: List[float],
importance: float,
metadata: Optional[Dict] = None
) -> bool:
"""添加记忆"""
try:
memory = VectorMemory(
id=id,
user_id=user_id,
content=content,
embedding=embedding,
importance=importance,
timestamp=datetime.now(),
metadata=metadata or {},
access_count=0,
last_access=None
)
if user_id not in self.memories:
self.memories[user_id] = []
self.memories[user_id].append(memory)
self._save()
logger.debug(f"添加记忆: {id} (用户: {user_id})")
return True
except Exception as e:
logger.error(f"添加记忆失败: {e}")
return False
async def search(
self,
user_id: str,
query_embedding: List[float],
limit: int = 5,
min_importance: float = 0.3
) -> List[VectorMemory]:
"""搜索相似记忆"""
if user_id not in self.memories:
return []
memories = self.memories[user_id]
# 过滤重要性
memories = [m for m in memories if m.importance >= min_importance]
if not memories:
return []
# 使用向量相似度排序
scored_memories = []
for memory in memories:
if memory.embedding:
similarity = self._cosine_similarity(query_embedding, memory.embedding)
scored_memories.append((similarity, memory))
if not scored_memories:
# 如果没有嵌入向量,按重要性排序
return await self.get_by_importance(user_id, limit, min_importance)
scored_memories.sort(reverse=True, key=lambda x: x[0])
return [m for _, m in scored_memories[:limit]]
async def get_by_importance(
self,
user_id: str,
limit: int = 5,
min_importance: float = 0.3
) -> List[VectorMemory]:
"""按重要性获取记忆"""
if user_id not in self.memories:
return []
memories = [m for m in self.memories[user_id] if m.importance >= min_importance]
memories.sort(key=lambda m: (m.importance, m.timestamp), reverse=True)
return memories[:limit]
def _cosine_similarity(self, vec1: List[float], vec2: List[float]) -> float:
"""计算余弦相似度"""
vec1 = np.array(vec1)
vec2 = np.array(vec2)
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
async def update_access(self, memory_id: str) -> bool:
"""更新访问记录"""
try:
for memories in self.memories.values():
for memory in memories:
if memory.id == memory_id:
memory.access_count += 1
memory.last_access = datetime.now()
self._save()
return True
return False
except Exception as e:
logger.error(f"更新访问记录失败: {e}")
return False
async def delete(self, memory_id: str) -> bool:
"""删除记忆"""
try:
for user_id, memories in self.memories.items():
for i, memory in enumerate(memories):
if memory.id == memory_id:
del self.memories[user_id][i]
self._save()
return True
return False
except Exception as e:
logger.error(f"删除记忆失败: {e}")
return False
async def get_all(self, user_id: str) -> List[VectorMemory]:
"""获取用户所有记忆"""
return self.memories.get(user_id, [])
async def clear_user(self, user_id: str) -> bool:
"""清除用户所有记忆"""
try:
if user_id in self.memories:
del self.memories[user_id]
self._save()
logger.info(f"清除用户记忆: {user_id}")
return True
except Exception as e:
logger.error(f"清除用户记忆失败: {e}")
return False
async def close(self):
"""关闭连接"""
self._save()
logger.info("JSON存储已关闭")

17
src/core/__init__.py Normal file
View File

@@ -0,0 +1,17 @@
"""
核心模块导出。
避免在包导入阶段触发 `bot -> handler -> core` 循环依赖。
"""
__all__ = ["MyClient", "Config"]
def __getattr__(name):
if name == "MyClient":
from .bot import MyClient
return MyClient
if name == "Config":
from .config import Config
return Config
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

105
src/core/bot.py Normal file
View File

@@ -0,0 +1,105 @@
"""
QQ机器人主程序
基于官方SDK: https://github.com/tencent-connect/botpy
官方文档: https://bot.q.qq.com/wiki/develop/api-v2/
"""
import botpy
from botpy.message import Message
from src.core.config import Config
from src.utils.logger import setup_logger
from src.handlers.message_handler_ai import MessageHandler
logger = setup_logger('QQBot')
def build_intents() -> botpy.Intents:
"""
构建最小可用的 intents。
- public_guild_messages: 频道公域 @机器人 消息
- public_messages: 群聊@ + C2C 私聊(好友单聊)消息
"""
intents = botpy.Intents.none()
intents.public_guild_messages = True
# 新版 botpy 中QQ 群聊@ / C2C 私聊依赖 public_messagesGROUP_AND_C2C_EVENT
if hasattr(intents, "public_messages"):
intents.public_messages = True
logger.info("✅ 已启用 public_messages支持群聊@与 C2C 私聊)")
else:
logger.warning("⚠️ 当前 botpy 不支持 public_messages可能无法接收 C2C 私聊事件")
return intents
class MyClient(botpy.Client):
"""QQ机器人客户端"""
def __init__(self, intents: botpy.Intents):
super().__init__(intents=intents)
self.message_handler = MessageHandler(self)
async def on_ready(self):
"""机器人启动完成事件"""
logger.info(f"🤖 机器人已启动: {self.robot.name} (ID: {self.robot.id})")
async def on_at_message_create(self, message: Message):
"""处理@机器人的消息(频道公域消息)"""
await self.message_handler.handle_at_message(message)
async def on_message_create(self, message: Message):
"""处理普通消息(需要私域权限)"""
await self.message_handler.handle_at_message(message)
async def on_direct_message_create(self, message: Message):
"""处理私信消息"""
await self.message_handler.handle_at_message(message)
async def on_group_at_message_create(self, message: Message):
"""处理群聊@消息"""
await self.message_handler.handle_at_message(message)
async def on_c2c_message_create(self, message: Message):
"""处理C2C消息单聊"""
await self.message_handler.handle_at_message(message)
async def on_guild_create(self, guild):
"""机器人加入频道事件"""
logger.info(f" 加入频道: {guild.name} (ID: {guild.id})")
async def on_guild_delete(self, guild):
"""机器人离开频道事件"""
logger.info(f" 离开频道: {guild.name} (ID: {guild.id})")
async def on_error(self, error):
"""错误处理"""
logger.error(f"❌ 发生错误: {error}")
def main():
"""主函数"""
try:
# 验证配置
Config.validate()
logger.info("✅ 配置验证通过")
# 创建机器人实例 - 使用最小可用 intents避免 disallowed intents(4014)
intents = build_intents()
logger.info("✅ Intents 配置完成(最小权限模式)")
client = MyClient(intents=intents)
# 启动机器人
logger.info("🚀 正在启动机器人...")
client.run(appid=Config.BOT_APPID, secret=Config.BOT_SECRET)
except ValueError as e:
logger.error(f"❌ 配置错误: {e}")
logger.error("请检查 .env 文件配置")
except Exception as e:
logger.error(f"❌ 启动失败: {e}")
raise
if __name__ == "__main__":
main()

72
src/core/config.py Normal file
View File

@@ -0,0 +1,72 @@
"""
QQ机器人配置管理模块
"""
import os
from typing import Optional
from dotenv import load_dotenv
# 加载环境变量
load_dotenv()
def _read_env(name: str, default: Optional[str] = None) -> Optional[str]:
"""
读取并清洗环境变量。
- 去除首尾空白
- 空字符串视为未设置
- 以 # 开头的值视为注释占位,视为未设置
"""
value = os.getenv(name)
if value is None:
return default
value = value.strip()
if not value or value.startswith('#'):
return default
return value
class Config:
"""机器人配置类"""
# 机器人基本信息
BOT_APPID = _read_env('BOT_APPID', '') or ''
BOT_SECRET = _read_env('BOT_SECRET', '') or ''
# 日志配置
LOG_LEVEL = _read_env('LOG_LEVEL', 'INFO') or 'INFO'
# 沙箱模式
SANDBOX_MODE = os.getenv('SANDBOX_MODE', 'False').lower() == 'true'
# AI配置
AI_PROVIDER = _read_env('AI_PROVIDER', 'openai') or 'openai'
AI_MODEL = _read_env('AI_MODEL', 'gpt-4') or 'gpt-4'
AI_API_KEY = _read_env('AI_API_KEY', '') or ''
AI_API_BASE = _read_env('AI_API_BASE', None)
# AI嵌入模型配置用于RAG
AI_EMBED_PROVIDER = _read_env('AI_EMBED_PROVIDER', 'openai') or 'openai'
AI_EMBED_MODEL = _read_env('AI_EMBED_MODEL', 'text-embedding-3-small') or 'text-embedding-3-small'
AI_EMBED_API_KEY = _read_env('AI_EMBED_API_KEY', None) # 留空则使用 AI_API_KEY
AI_EMBED_API_BASE = _read_env('AI_EMBED_API_BASE', None) # 留空则使用 AI_API_BASE
# 向量数据库配置
AI_USE_VECTOR_DB = os.getenv('AI_USE_VECTOR_DB', 'true').lower() == 'true'
@classmethod
def validate(cls):
"""验证配置是否完整"""
if not cls.BOT_APPID:
raise ValueError("BOT_APPID 未配置")
if not cls.BOT_SECRET:
raise ValueError("BOT_SECRET 未配置")
# AI配置验证可选
if cls.AI_API_KEY:
print(f"✅ AI配置: {cls.AI_PROVIDER}/{cls.AI_MODEL}")
else:
print("⚠️ AI_API_KEY 未设置AI功能将不可用")
return True

6
src/handlers/__init__.py Normal file
View File

@@ -0,0 +1,6 @@
"""
消息处理模块
"""
from .message_handler import MessageHandler
__all__ = ['MessageHandler']

View File

@@ -0,0 +1,147 @@
"""
基础消息处理器(不含 AI 能力)。
"""
import botpy
from botpy.message import Message
from src.utils.logger import setup_logger
logger = setup_logger("MessageHandler")
class MessageHandler:
"""消息处理器。"""
def __init__(self, client):
self.client = client
async def handle_at_message(self, message: Message):
"""
处理 @机器人的消息。
Args:
message: 消息对象
"""
author_name = self._get_author_name(message)
msg_type = type(message).__name__.replace("Message", "")
logger.info(
f"[{msg_type}] {author_name}: {message.content[:30]}{'...' if len(message.content) > 30 else ''}"
)
content = message.content.strip()
if self.client.robot.name:
content = content.replace(f"@{self.client.robot.name}", "").strip()
if content.startswith("/"):
await self._handle_command(message, content)
else:
await self._handle_chat(message, content)
@staticmethod
def _get_author_name(message: Message) -> str:
"""兼容频道消息、群消息、C2C 私聊消息的作者字段。"""
author = getattr(message, "author", None)
if not author:
return "Unknown"
return (
getattr(author, "username", None)
or getattr(author, "nick", None)
or getattr(author, "member_openid", None)
or getattr(author, "user_openid", None)
or "Unknown"
)
async def _handle_command(self, message: Message, content: str):
"""
处理命令消息。
Args:
message: 消息对象
content: 消息内容
"""
command = content.split()[0].lower()
if command == "/help" or command == "/帮助":
await self._send_help(message)
elif command == "/ping":
await self._send_ping(message)
elif command == "/info" or command == "/信息":
await self._send_info(message)
else:
await self._send_reply(message, f"未知命令: {command}\n发送 /help 查看可用命令")
async def _handle_chat(self, message: Message, content: str):
"""
处理普通聊天消息。
Args:
message: 消息对象
content: 消息内容
"""
if "你好" in content or "hello" in content.lower():
reply = "你好!我是 QQ 机器人,很高兴为你服务。"
elif "天气" in content:
reply = "天气查询功能还在开发中。"
elif "时间" in content:
from datetime import datetime
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
reply = f"当前时间: {now}"
else:
reply = f"收到你的消息: {content}\n发送 /help 查看我能做什么"
await self._send_reply(message, reply)
async def _send_help(self, message: Message):
"""发送帮助信息。"""
help_text = """
QQ 机器人帮助
可用命令:
/help 或 /帮助 - 显示帮助信息
/ping - 测试机器人连通性
/info 或 /信息 - 显示机器人信息
其他功能:
- @我并发送消息,我会回复你
- 输入“你好”可打招呼
- 输入“时间”可查看当前时间
""".strip()
await self._send_reply(message, help_text)
async def _send_ping(self, message: Message):
"""发送 Ping 响应。"""
await self._send_reply(message, "Pong机器人运行正常。")
async def _send_info(self, message: Message):
"""发送机器人信息。"""
info_text = f"""
机器人信息
名称: {self.client.robot.name}
ID: {self.client.robot.id}
状态: 运行中
基于 QQ 官方 SDK (botpy)
""".strip()
await self._send_reply(message, info_text)
async def _send_reply(self, message: Message, content: str):
"""
发送回复消息。
Args:
message: 原始消息对象
content: 回复内容
"""
try:
await message.reply(content=content)
logger.info(f"回复: {content[:30]}{'...' if len(content) > 30 else ''}")
except Exception as e:
logger.error(f"发送失败: {e}")
import traceback
logger.debug(f"详细错误:\n{traceback.format_exc()}")

File diff suppressed because it is too large Load Diff

6
src/utils/__init__.py Normal file
View File

@@ -0,0 +1,6 @@
"""
工具模块
"""
from .logger import setup_logger
__all__ = ['setup_logger']

63
src/utils/logger.py Normal file
View File

@@ -0,0 +1,63 @@
"""
日志配置模块
"""
import logging
import os
from pathlib import Path
def setup_logger(name='QQBot', level=None):
"""
设置日志记录器
Args:
name: 日志记录器名称
level: 日志级别,默认从环境变量读取
Returns:
logging.Logger: 配置好的日志记录器
"""
# 创建logs目录
log_dir = Path(__file__).parent.parent.parent / 'logs'
log_dir.mkdir(exist_ok=True)
# 设置日志级别
if level is None:
level = os.getenv('LOG_LEVEL', 'INFO')
# 创建日志记录器
logger = logging.getLogger(name)
logger.setLevel(getattr(logging, level.upper()))
# 避免向 root logger 传播导致重复输出
logger.propagate = False
# 避免重复添加处理器
if logger.handlers:
return logger
# 控制台处理器
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_format = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
console_handler.setFormatter(console_format)
# 文件处理器
file_handler = logging.FileHandler(
log_dir / 'bot.log',
encoding='utf-8'
)
file_handler.setLevel(logging.DEBUG)
file_format = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
file_handler.setFormatter(file_format)
# 添加处理器
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger