diff --git a/.claude/skills/algorithmic-art/.openskills.json b/.claude/skills/algorithmic-art/.openskills.json
new file mode 100644
index 0000000..8a7d8ac
--- /dev/null
+++ b/.claude/skills/algorithmic-art/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\algorithmic-art",
+ "installedAt": "2026-03-02T09:19:50.038Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/algorithmic-art/LICENSE.txt b/.claude/skills/algorithmic-art/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/algorithmic-art/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/algorithmic-art/SKILL.md b/.claude/skills/algorithmic-art/SKILL.md
new file mode 100644
index 0000000..634f6fa
--- /dev/null
+++ b/.claude/skills/algorithmic-art/SKILL.md
@@ -0,0 +1,405 @@
+---
+name: algorithmic-art
+description: Creating algorithmic art using p5.js with seeded randomness and interactive parameter exploration. Use this when users request creating art using code, generative art, algorithmic art, flow fields, or particle systems. Create original algorithmic art rather than copying existing artists' work to avoid copyright violations.
+license: Complete terms in LICENSE.txt
+---
+
+Algorithmic philosophies are computational aesthetic movements that are then expressed through code. Output .md files (philosophy), .html files (interactive viewer), and .js files (generative algorithms).
+
+This happens in two steps:
+1. Algorithmic Philosophy Creation (.md file)
+2. Express by creating p5.js generative art (.html + .js files)
+
+First, undertake this task:
+
+## ALGORITHMIC PHILOSOPHY CREATION
+
+To begin, create an ALGORITHMIC PHILOSOPHY (not static images or templates) that will be interpreted through:
+- Computational processes, emergent behavior, mathematical beauty
+- Seeded randomness, noise fields, organic systems
+- Particles, flows, fields, forces
+- Parametric variation and controlled chaos
+
+### THE CRITICAL UNDERSTANDING
+- What is received: Some subtle input or instructions by the user to take into account, but use as a foundation; it should not constrain creative freedom.
+- What is created: An algorithmic philosophy/generative aesthetic movement.
+- What happens next: The same version receives the philosophy and EXPRESSES IT IN CODE - creating p5.js sketches that are 90% algorithmic generation, 10% essential parameters.
+
+Consider this approach:
+- Write a manifesto for a generative art movement
+- The next phase involves writing the algorithm that brings it to life
+
+The philosophy must emphasize: Algorithmic expression. Emergent behavior. Computational beauty. Seeded variation.
+
+### HOW TO GENERATE AN ALGORITHMIC PHILOSOPHY
+
+**Name the movement** (1-2 words): "Organic Turbulence" / "Quantum Harmonics" / "Emergent Stillness"
+
+**Articulate the philosophy** (4-6 paragraphs - concise but complete):
+
+To capture the ALGORITHMIC essence, express how this philosophy manifests through:
+- Computational processes and mathematical relationships?
+- Noise functions and randomness patterns?
+- Particle behaviors and field dynamics?
+- Temporal evolution and system states?
+- Parametric variation and emergent complexity?
+
+**CRITICAL GUIDELINES:**
+- **Avoid redundancy**: Each algorithmic aspect should be mentioned once. Avoid repeating concepts about noise theory, particle dynamics, or mathematical principles unless adding new depth.
+- **Emphasize craftsmanship REPEATEDLY**: The philosophy MUST stress multiple times that the final algorithm should appear as though it took countless hours to develop, was refined with care, and comes from someone at the absolute top of their field. This framing is essential - repeat phrases like "meticulously crafted algorithm," "the product of deep computational expertise," "painstaking optimization," "master-level implementation."
+- **Leave creative space**: Be specific about the algorithmic direction, but concise enough that the next Claude has room to make interpretive implementation choices at an extremely high level of craftsmanship.
+
+The philosophy must guide the next version to express ideas ALGORITHMICALLY, not through static images. Beauty lives in the process, not the final frame.
+
+### PHILOSOPHY EXAMPLES
+
+**"Organic Turbulence"**
+Philosophy: Chaos constrained by natural law, order emerging from disorder.
+Algorithmic expression: Flow fields driven by layered Perlin noise. Thousands of particles following vector forces, their trails accumulating into organic density maps. Multiple noise octaves create turbulent regions and calm zones. Color emerges from velocity and density - fast particles burn bright, slow ones fade to shadow. The algorithm runs until equilibrium - a meticulously tuned balance where every parameter was refined through countless iterations by a master of computational aesthetics.
+
+**"Quantum Harmonics"**
+Philosophy: Discrete entities exhibiting wave-like interference patterns.
+Algorithmic expression: Particles initialized on a grid, each carrying a phase value that evolves through sine waves. When particles are near, their phases interfere - constructive interference creates bright nodes, destructive creates voids. Simple harmonic motion generates complex emergent mandalas. The result of painstaking frequency calibration where every ratio was carefully chosen to produce resonant beauty.
+
+**"Recursive Whispers"**
+Philosophy: Self-similarity across scales, infinite depth in finite space.
+Algorithmic expression: Branching structures that subdivide recursively. Each branch slightly randomized but constrained by golden ratios. L-systems or recursive subdivision generate tree-like forms that feel both mathematical and organic. Subtle noise perturbations break perfect symmetry. Line weights diminish with each recursion level. Every branching angle the product of deep mathematical exploration.
+
+**"Field Dynamics"**
+Philosophy: Invisible forces made visible through their effects on matter.
+Algorithmic expression: Vector fields constructed from mathematical functions or noise. Particles born at edges, flowing along field lines, dying when they reach equilibrium or boundaries. Multiple fields can attract, repel, or rotate particles. The visualization shows only the traces - ghost-like evidence of invisible forces. A computational dance meticulously choreographed through force balance.
+
+**"Stochastic Crystallization"**
+Philosophy: Random processes crystallizing into ordered structures.
+Algorithmic expression: Randomized circle packing or Voronoi tessellation. Start with random points, let them evolve through relaxation algorithms. Cells push apart until equilibrium. Color based on cell size, neighbor count, or distance from center. The organic tiling that emerges feels both random and inevitable. Every seed produces unique crystalline beauty - the mark of a master-level generative algorithm.
+
+*These are condensed examples. The actual algorithmic philosophy should be 4-6 substantial paragraphs.*
+
+### ESSENTIAL PRINCIPLES
+- **ALGORITHMIC PHILOSOPHY**: Creating a computational worldview to be expressed through code
+- **PROCESS OVER PRODUCT**: Always emphasize that beauty emerges from the algorithm's execution - each run is unique
+- **PARAMETRIC EXPRESSION**: Ideas communicate through mathematical relationships, forces, behaviors - not static composition
+- **ARTISTIC FREEDOM**: The next Claude interprets the philosophy algorithmically - provide creative implementation room
+- **PURE GENERATIVE ART**: This is about making LIVING ALGORITHMS, not static images with randomness
+- **EXPERT CRAFTSMANSHIP**: Repeatedly emphasize the final algorithm must feel meticulously crafted, refined through countless iterations, the product of deep expertise by someone at the absolute top of their field in computational aesthetics
+
+**The algorithmic philosophy should be 4-6 paragraphs long.** Fill it with poetic computational philosophy that brings together the intended vision. Avoid repeating the same points. Output this algorithmic philosophy as a .md file.
+
+---
+
+## DEDUCING THE CONCEPTUAL SEED
+
+**CRITICAL STEP**: Before implementing the algorithm, identify the subtle conceptual thread from the original request.
+
+**THE ESSENTIAL PRINCIPLE**:
+The concept is a **subtle, niche reference embedded within the algorithm itself** - not always literal, always sophisticated. Someone familiar with the subject should feel it intuitively, while others simply experience a masterful generative composition. The algorithmic philosophy provides the computational language. The deduced concept provides the soul - the quiet conceptual DNA woven invisibly into parameters, behaviors, and emergence patterns.
+
+This is **VERY IMPORTANT**: The reference must be so refined that it enhances the work's depth without announcing itself. Think like a jazz musician quoting another song through algorithmic harmony - only those who know will catch it, but everyone appreciates the generative beauty.
+
+---
+
+## P5.JS IMPLEMENTATION
+
+With the philosophy AND conceptual framework established, express it through code. Pause to gather thoughts before proceeding. Use only the algorithmic philosophy created and the instructions below.
+
+### ⚠️ STEP 0: READ THE TEMPLATE FIRST ⚠️
+
+**CRITICAL: BEFORE writing any HTML:**
+
+1. **Read** `templates/viewer.html` using the Read tool
+2. **Study** the exact structure, styling, and Anthropic branding
+3. **Use that file as the LITERAL STARTING POINT** - not just inspiration
+4. **Keep all FIXED sections exactly as shown** (header, sidebar structure, Anthropic colors/fonts, seed controls, action buttons)
+5. **Replace only the VARIABLE sections** marked in the file's comments (algorithm, parameters, UI controls for parameters)
+
+**Avoid:**
+- ❌ Creating HTML from scratch
+- ❌ Inventing custom styling or color schemes
+- ❌ Using system fonts or dark themes
+- ❌ Changing the sidebar structure
+
+**Follow these practices:**
+- ✅ Copy the template's exact HTML structure
+- ✅ Keep Anthropic branding (Poppins/Lora fonts, light colors, gradient backdrop)
+- ✅ Maintain the sidebar layout (Seed → Parameters → Colors? → Actions)
+- ✅ Replace only the p5.js algorithm and parameter controls
+
+The template is the foundation. Build on it, don't rebuild it.
+
+---
+
+To create gallery-quality computational art that lives and breathes, use the algorithmic philosophy as the foundation.
+
+### TECHNICAL REQUIREMENTS
+
+**Seeded Randomness (Art Blocks Pattern)**:
+```javascript
+// ALWAYS use a seed for reproducibility
+let seed = 12345; // or hash from user input
+randomSeed(seed);
+noiseSeed(seed);
+```
+
+**Parameter Structure - FOLLOW THE PHILOSOPHY**:
+
+To establish parameters that emerge naturally from the algorithmic philosophy, consider: "What qualities of this system can be adjusted?"
+
+```javascript
+let params = {
+ seed: 12345, // Always include seed for reproducibility
+ // colors
+ // Add parameters that control YOUR algorithm:
+ // - Quantities (how many?)
+ // - Scales (how big? how fast?)
+ // - Probabilities (how likely?)
+ // - Ratios (what proportions?)
+ // - Angles (what direction?)
+ // - Thresholds (when does behavior change?)
+};
+```
+
+**To design effective parameters, focus on the properties the system needs to be tunable rather than thinking in terms of "pattern types".**
+
+**Core Algorithm - EXPRESS THE PHILOSOPHY**:
+
+**CRITICAL**: The algorithmic philosophy should dictate what to build.
+
+To express the philosophy through code, avoid thinking "which pattern should I use?" and instead think "how to express this philosophy through code?"
+
+If the philosophy is about **organic emergence**, consider using:
+- Elements that accumulate or grow over time
+- Random processes constrained by natural rules
+- Feedback loops and interactions
+
+If the philosophy is about **mathematical beauty**, consider using:
+- Geometric relationships and ratios
+- Trigonometric functions and harmonics
+- Precise calculations creating unexpected patterns
+
+If the philosophy is about **controlled chaos**, consider using:
+- Random variation within strict boundaries
+- Bifurcation and phase transitions
+- Order emerging from disorder
+
+**The algorithm flows from the philosophy, not from a menu of options.**
+
+To guide the implementation, let the conceptual essence inform creative and original choices. Build something that expresses the vision for this particular request.
+
+**Canvas Setup**: Standard p5.js structure:
+```javascript
+function setup() {
+ createCanvas(1200, 1200);
+ // Initialize your system
+}
+
+function draw() {
+ // Your generative algorithm
+ // Can be static (noLoop) or animated
+}
+```
+
+### CRAFTSMANSHIP REQUIREMENTS
+
+**CRITICAL**: To achieve mastery, create algorithms that feel like they emerged through countless iterations by a master generative artist. Tune every parameter carefully. Ensure every pattern emerges with purpose. This is NOT random noise - this is CONTROLLED CHAOS refined through deep expertise.
+
+- **Balance**: Complexity without visual noise, order without rigidity
+- **Color Harmony**: Thoughtful palettes, not random RGB values
+- **Composition**: Even in randomness, maintain visual hierarchy and flow
+- **Performance**: Smooth execution, optimized for real-time if animated
+- **Reproducibility**: Same seed ALWAYS produces identical output
+
+### OUTPUT FORMAT
+
+Output:
+1. **Algorithmic Philosophy** - As markdown or text explaining the generative aesthetic
+2. **Single HTML Artifact** - Self-contained interactive generative art built from `templates/viewer.html` (see STEP 0 and next section)
+
+The HTML artifact contains everything: p5.js (from CDN), the algorithm, parameter controls, and UI - all in one file that works immediately in claude.ai artifacts or any browser. Start from the template file, not from scratch.
+
+---
+
+## INTERACTIVE ARTIFACT CREATION
+
+**REMINDER: `templates/viewer.html` should have already been read (see STEP 0). Use that file as the starting point.**
+
+To allow exploration of the generative art, create a single, self-contained HTML artifact. Ensure this artifact works immediately in claude.ai or any browser - no setup required. Embed everything inline.
+
+### CRITICAL: WHAT'S FIXED VS VARIABLE
+
+The `templates/viewer.html` file is the foundation. It contains the exact structure and styling needed.
+
+**FIXED (always include exactly as shown):**
+- Layout structure (header, sidebar, main canvas area)
+- Anthropic branding (UI colors, fonts, gradients)
+- Seed section in sidebar:
+ - Seed display
+ - Previous/Next buttons
+ - Random button
+ - Jump to seed input + Go button
+- Actions section in sidebar:
+ - Regenerate button
+ - Reset button
+
+**VARIABLE (customize for each artwork):**
+- The entire p5.js algorithm (setup/draw/classes)
+- The parameters object (define what the art needs)
+- The Parameters section in sidebar:
+ - Number of parameter controls
+ - Parameter names
+ - Min/max/step values for sliders
+ - Control types (sliders, inputs, etc.)
+- Colors section (optional):
+ - Some art needs color pickers
+ - Some art might use fixed colors
+ - Some art might be monochrome (no color controls needed)
+ - Decide based on the art's needs
+
+**Every artwork should have unique parameters and algorithm!** The fixed parts provide consistent UX - everything else expresses the unique vision.
+
+### REQUIRED FEATURES
+
+**1. Parameter Controls**
+- Sliders for numeric parameters (particle count, noise scale, speed, etc.)
+- Color pickers for palette colors
+- Real-time updates when parameters change
+- Reset button to restore defaults
+
+**2. Seed Navigation**
+- Display current seed number
+- "Previous" and "Next" buttons to cycle through seeds
+- "Random" button for random seed
+- Input field to jump to specific seed
+- Generate 100 variations when requested (seeds 1-100)
+
+**3. Single Artifact Structure**
+```html
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```
+
+**CRITICAL**: This is a single artifact. No external files, no imports (except p5.js CDN). Everything inline.
+
+**4. Implementation Details - BUILD THE SIDEBAR**
+
+The sidebar structure:
+
+**1. Seed (FIXED)** - Always include exactly as shown:
+- Seed display
+- Prev/Next/Random/Jump buttons
+
+**2. Parameters (VARIABLE)** - Create controls for the art:
+```html
+
+ Parameter Name
+
+ ...
+
+```
+Add as many control-group divs as there are parameters.
+
+**3. Colors (OPTIONAL/VARIABLE)** - Include if the art needs adjustable colors:
+- Add color pickers if users should control palette
+- Skip this section if the art uses fixed colors
+- Skip if the art is monochrome
+
+**4. Actions (FIXED)** - Always include exactly as shown:
+- Regenerate button
+- Reset button
+- Download PNG button
+
+**Requirements**:
+- Seed controls must work (prev/next/random/jump/display)
+- All parameters must have UI controls
+- Regenerate, Reset, Download buttons must work
+- Keep Anthropic branding (UI styling, not art colors)
+
+### USING THE ARTIFACT
+
+The HTML artifact works immediately:
+1. **In claude.ai**: Displayed as an interactive artifact - runs instantly
+2. **As a file**: Save and open in any browser - no server needed
+3. **Sharing**: Send the HTML file - it's completely self-contained
+
+---
+
+## VARIATIONS & EXPLORATION
+
+The artifact includes seed navigation by default (prev/next/random buttons), allowing users to explore variations without creating multiple files. If the user wants specific variations highlighted:
+
+- Include seed presets (buttons for "Variation 1: Seed 42", "Variation 2: Seed 127", etc.)
+- Add a "Gallery Mode" that shows thumbnails of multiple seeds side-by-side
+- All within the same single artifact
+
+This is like creating a series of prints from the same plate - the algorithm is consistent, but each seed reveals different facets of its potential. The interactive nature means users discover their own favorites by exploring the seed space.
+
+---
+
+## THE CREATIVE PROCESS
+
+**User request** → **Algorithmic philosophy** → **Implementation**
+
+Each request is unique. The process involves:
+
+1. **Interpret the user's intent** - What aesthetic is being sought?
+2. **Create an algorithmic philosophy** (4-6 paragraphs) describing the computational approach
+3. **Implement it in code** - Build the algorithm that expresses this philosophy
+4. **Design appropriate parameters** - What should be tunable?
+5. **Build matching UI controls** - Sliders/inputs for those parameters
+
+**The constants**:
+- Anthropic branding (colors, fonts, layout)
+- Seed navigation (always present)
+- Self-contained HTML artifact
+
+**Everything else is variable**:
+- The algorithm itself
+- The parameters
+- The UI controls
+- The visual outcome
+
+To achieve the best results, trust creativity and let the philosophy guide the implementation.
+
+---
+
+## RESOURCES
+
+This skill includes helpful templates and documentation:
+
+- **templates/viewer.html**: REQUIRED STARTING POINT for all HTML artifacts.
+ - This is the foundation - contains the exact structure and Anthropic branding
+ - **Keep unchanged**: Layout structure, sidebar organization, Anthropic colors/fonts, seed controls, action buttons
+ - **Replace**: The p5.js algorithm, parameter definitions, and UI controls in Parameters section
+ - The extensive comments in the file mark exactly what to keep vs replace
+
+- **templates/generator_template.js**: Reference for p5.js best practices and code structure principles.
+ - Shows how to organize parameters, use seeded randomness, structure classes
+ - NOT a pattern menu - use these principles to build unique algorithms
+ - Embed algorithms inline in the HTML artifact (don't create separate .js files)
+
+**Critical reminder**:
+- The **template is the STARTING POINT**, not inspiration
+- The **algorithm is where to create** something unique
+- Don't copy the flow field example - build what the philosophy demands
+- But DO keep the exact UI structure and Anthropic branding from the template
\ No newline at end of file
diff --git a/.claude/skills/algorithmic-art/templates/generator_template.js b/.claude/skills/algorithmic-art/templates/generator_template.js
new file mode 100644
index 0000000..e263fbd
--- /dev/null
+++ b/.claude/skills/algorithmic-art/templates/generator_template.js
@@ -0,0 +1,223 @@
+/**
+ * ═══════════════════════════════════════════════════════════════════════════
+ * P5.JS GENERATIVE ART - BEST PRACTICES
+ * ═══════════════════════════════════════════════════════════════════════════
+ *
+ * This file shows STRUCTURE and PRINCIPLES for p5.js generative art.
+ * It does NOT prescribe what art you should create.
+ *
+ * Your algorithmic philosophy should guide what you build.
+ * These are just best practices for how to structure your code.
+ *
+ * ═══════════════════════════════════════════════════════════════════════════
+ */
+
+// ============================================================================
+// 1. PARAMETER ORGANIZATION
+// ============================================================================
+// Keep all tunable parameters in one object
+// This makes it easy to:
+// - Connect to UI controls
+// - Reset to defaults
+// - Serialize/save configurations
+
+let params = {
+ // Define parameters that match YOUR algorithm
+ // Examples (customize for your art):
+ // - Counts: how many elements (particles, circles, branches, etc.)
+ // - Scales: size, speed, spacing
+ // - Probabilities: likelihood of events
+ // - Angles: rotation, direction
+ // - Colors: palette arrays
+
+ seed: 12345,
+ // define colorPalette as an array -- choose whatever colors you'd like ['#d97757', '#6a9bcc', '#788c5d', '#b0aea5']
+ // Add YOUR parameters here based on your algorithm
+};
+
+// ============================================================================
+// 2. SEEDED RANDOMNESS (Critical for reproducibility)
+// ============================================================================
+// ALWAYS use seeded random for Art Blocks-style reproducible output
+
+function initializeSeed(seed) {
+ randomSeed(seed);
+ noiseSeed(seed);
+ // Now all random() and noise() calls will be deterministic
+}
+
+// ============================================================================
+// 3. P5.JS LIFECYCLE
+// ============================================================================
+
+function setup() {
+ createCanvas(800, 800);
+
+ // Initialize seed first
+ initializeSeed(params.seed);
+
+ // Set up your generative system
+ // This is where you initialize:
+ // - Arrays of objects
+ // - Grid structures
+ // - Initial positions
+ // - Starting states
+
+ // For static art: call noLoop() at the end of setup
+ // For animated art: let draw() keep running
+}
+
+function draw() {
+ // Option 1: Static generation (runs once, then stops)
+ // - Generate everything in setup()
+ // - Call noLoop() in setup()
+ // - draw() doesn't do much or can be empty
+
+ // Option 2: Animated generation (continuous)
+ // - Update your system each frame
+ // - Common patterns: particle movement, growth, evolution
+ // - Can optionally call noLoop() after N frames
+
+ // Option 3: User-triggered regeneration
+ // - Use noLoop() by default
+ // - Call redraw() when parameters change
+}
+
+// ============================================================================
+// 4. CLASS STRUCTURE (When you need objects)
+// ============================================================================
+// Use classes when your algorithm involves multiple entities
+// Examples: particles, agents, cells, nodes, etc.
+
+class Entity {
+ constructor() {
+ // Initialize entity properties
+ // Use random() here - it will be seeded
+ }
+
+ update() {
+ // Update entity state
+ // This might involve:
+ // - Physics calculations
+ // - Behavioral rules
+ // - Interactions with neighbors
+ }
+
+ display() {
+ // Render the entity
+ // Keep rendering logic separate from update logic
+ }
+}
+
+// ============================================================================
+// 5. PERFORMANCE CONSIDERATIONS
+// ============================================================================
+
+// For large numbers of elements:
+// - Pre-calculate what you can
+// - Use simple collision detection (spatial hashing if needed)
+// - Limit expensive operations (sqrt, trig) when possible
+// - Consider using p5 vectors efficiently
+
+// For smooth animation:
+// - Aim for 60fps
+// - Profile if things are slow
+// - Consider reducing particle counts or simplifying calculations
+
+// ============================================================================
+// 6. UTILITY FUNCTIONS
+// ============================================================================
+
+// Color utilities
+function hexToRgb(hex) {
+ const result = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(hex);
+ return result ? {
+ r: parseInt(result[1], 16),
+ g: parseInt(result[2], 16),
+ b: parseInt(result[3], 16)
+ } : null;
+}
+
+function colorFromPalette(index) {
+ return params.colorPalette[index % params.colorPalette.length];
+}
+
+// Mapping and easing
+function mapRange(value, inMin, inMax, outMin, outMax) {
+ return outMin + (outMax - outMin) * ((value - inMin) / (inMax - inMin));
+}
+
+function easeInOutCubic(t) {
+ return t < 0.5 ? 4 * t * t * t : 1 - Math.pow(-2 * t + 2, 3) / 2;
+}
+
+// Constrain to bounds
+function wrapAround(value, max) {
+ if (value < 0) return max;
+ if (value > max) return 0;
+ return value;
+}
+
+// ============================================================================
+// 7. PARAMETER UPDATES (Connect to UI)
+// ============================================================================
+
+function updateParameter(paramName, value) {
+ params[paramName] = value;
+ // Decide if you need to regenerate or just update
+ // Some params can update in real-time, others need full regeneration
+}
+
+function regenerate() {
+ // Reinitialize your generative system
+ // Useful when parameters change significantly
+ initializeSeed(params.seed);
+ // Then regenerate your system
+}
+
+// ============================================================================
+// 8. COMMON P5.JS PATTERNS
+// ============================================================================
+
+// Drawing with transparency for trails/fading
+function fadeBackground(opacity) {
+ fill(250, 249, 245, opacity); // Anthropic light with alpha
+ noStroke();
+ rect(0, 0, width, height);
+}
+
+// Using noise for organic variation
+function getNoiseValue(x, y, scale = 0.01) {
+ return noise(x * scale, y * scale);
+}
+
+// Creating vectors from angles
+function vectorFromAngle(angle, magnitude = 1) {
+ return createVector(cos(angle), sin(angle)).mult(magnitude);
+}
+
+// ============================================================================
+// 9. EXPORT FUNCTIONS
+// ============================================================================
+
+function exportImage() {
+ saveCanvas('generative-art-' + params.seed, 'png');
+}
+
+// ============================================================================
+// REMEMBER
+// ============================================================================
+//
+// These are TOOLS and PRINCIPLES, not a recipe.
+// Your algorithmic philosophy should guide WHAT you create.
+// This structure helps you create it WELL.
+//
+// Focus on:
+// - Clean, readable code
+// - Parameterized for exploration
+// - Seeded for reproducibility
+// - Performant execution
+//
+// The art itself is entirely up to you!
+//
+// ============================================================================
\ No newline at end of file
diff --git a/.claude/skills/algorithmic-art/templates/viewer.html b/.claude/skills/algorithmic-art/templates/viewer.html
new file mode 100644
index 0000000..630cc1f
--- /dev/null
+++ b/.claude/skills/algorithmic-art/templates/viewer.html
@@ -0,0 +1,599 @@
+
+
+
+
+
+
+ Generative Art Viewer
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Initializing generative art...
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.claude/skills/brand-guidelines/.openskills.json b/.claude/skills/brand-guidelines/.openskills.json
new file mode 100644
index 0000000..d03a32f
--- /dev/null
+++ b/.claude/skills/brand-guidelines/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\brand-guidelines",
+ "installedAt": "2026-03-02T09:19:50.041Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/brand-guidelines/LICENSE.txt b/.claude/skills/brand-guidelines/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/brand-guidelines/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/brand-guidelines/SKILL.md b/.claude/skills/brand-guidelines/SKILL.md
new file mode 100644
index 0000000..47c72c6
--- /dev/null
+++ b/.claude/skills/brand-guidelines/SKILL.md
@@ -0,0 +1,73 @@
+---
+name: brand-guidelines
+description: Applies Anthropic's official brand colors and typography to any sort of artifact that may benefit from having Anthropic's look-and-feel. Use it when brand colors or style guidelines, visual formatting, or company design standards apply.
+license: Complete terms in LICENSE.txt
+---
+
+# Anthropic Brand Styling
+
+## Overview
+
+To access Anthropic's official brand identity and style resources, use this skill.
+
+**Keywords**: branding, corporate identity, visual identity, post-processing, styling, brand colors, typography, Anthropic brand, visual formatting, visual design
+
+## Brand Guidelines
+
+### Colors
+
+**Main Colors:**
+
+- Dark: `#141413` - Primary text and dark backgrounds
+- Light: `#faf9f5` - Light backgrounds and text on dark
+- Mid Gray: `#b0aea5` - Secondary elements
+- Light Gray: `#e8e6dc` - Subtle backgrounds
+
+**Accent Colors:**
+
+- Orange: `#d97757` - Primary accent
+- Blue: `#6a9bcc` - Secondary accent
+- Green: `#788c5d` - Tertiary accent
+
+### Typography
+
+- **Headings**: Poppins (with Arial fallback)
+- **Body Text**: Lora (with Georgia fallback)
+- **Note**: Fonts should be pre-installed in your environment for best results
+
+## Features
+
+### Smart Font Application
+
+- Applies Poppins font to headings (24pt and larger)
+- Applies Lora font to body text
+- Automatically falls back to Arial/Georgia if custom fonts unavailable
+- Preserves readability across all systems
+
+### Text Styling
+
+- Headings (24pt+): Poppins font
+- Body text: Lora font
+- Smart color selection based on background
+- Preserves text hierarchy and formatting
+
+### Shape and Accent Colors
+
+- Non-text shapes use accent colors
+- Cycles through orange, blue, and green accents
+- Maintains visual interest while staying on-brand
+
+## Technical Details
+
+### Font Management
+
+- Uses system-installed Poppins and Lora fonts when available
+- Provides automatic fallback to Arial (headings) and Georgia (body)
+- No font installation required - works with existing system fonts
+- For best results, pre-install Poppins and Lora fonts in your environment
+
+### Color Application
+
+- Uses RGB color values for precise brand matching
+- Applied via python-pptx's RGBColor class
+- Maintains color fidelity across different systems
diff --git a/.claude/skills/canvas-design/.openskills.json b/.claude/skills/canvas-design/.openskills.json
new file mode 100644
index 0000000..d85997a
--- /dev/null
+++ b/.claude/skills/canvas-design/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\canvas-design",
+ "installedAt": "2026-03-02T09:19:50.075Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/canvas-design/LICENSE.txt b/.claude/skills/canvas-design/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/canvas-design/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/canvas-design/SKILL.md b/.claude/skills/canvas-design/SKILL.md
new file mode 100644
index 0000000..9f63fee
--- /dev/null
+++ b/.claude/skills/canvas-design/SKILL.md
@@ -0,0 +1,130 @@
+---
+name: canvas-design
+description: Create beautiful visual art in .png and .pdf documents using design philosophy. You should use this skill when the user asks to create a poster, piece of art, design, or other static piece. Create original visual designs, never copying existing artists' work to avoid copyright violations.
+license: Complete terms in LICENSE.txt
+---
+
+These are instructions for creating design philosophies - aesthetic movements that are then EXPRESSED VISUALLY. Output only .md files, .pdf files, and .png files.
+
+Complete this in two steps:
+1. Design Philosophy Creation (.md file)
+2. Express by creating it on a canvas (.pdf file or .png file)
+
+First, undertake this task:
+
+## DESIGN PHILOSOPHY CREATION
+
+To begin, create a VISUAL PHILOSOPHY (not layouts or templates) that will be interpreted through:
+- Form, space, color, composition
+- Images, graphics, shapes, patterns
+- Minimal text as visual accent
+
+### THE CRITICAL UNDERSTANDING
+- What is received: Some subtle input or instructions by the user that should be taken into account, but used as a foundation; it should not constrain creative freedom.
+- What is created: A design philosophy/aesthetic movement.
+- What happens next: Then, the same version receives the philosophy and EXPRESSES IT VISUALLY - creating artifacts that are 90% visual design, 10% essential text.
+
+Consider this approach:
+- Write a manifesto for an art movement
+- The next phase involves making the artwork
+
+The philosophy must emphasize: Visual expression. Spatial communication. Artistic interpretation. Minimal words.
+
+### HOW TO GENERATE A VISUAL PHILOSOPHY
+
+**Name the movement** (1-2 words): "Brutalist Joy" / "Chromatic Silence" / "Metabolist Dreams"
+
+**Articulate the philosophy** (4-6 paragraphs - concise but complete):
+
+To capture the VISUAL essence, express how the philosophy manifests through:
+- Space and form
+- Color and material
+- Scale and rhythm
+- Composition and balance
+- Visual hierarchy
+
+**CRITICAL GUIDELINES:**
+- **Avoid redundancy**: Each design aspect should be mentioned once. Avoid repeating points about color theory, spatial relationships, or typographic principles unless adding new depth.
+- **Emphasize craftsmanship REPEATEDLY**: The philosophy MUST stress multiple times that the final work should appear as though it took countless hours to create, was labored over with care, and comes from someone at the absolute top of their field. This framing is essential - repeat phrases like "meticulously crafted," "the product of deep expertise," "painstaking attention," "master-level execution."
+- **Leave creative space**: Remain specific about the aesthetic direction, but concise enough that the next Claude has room to make interpretive choices also at a extremely high level of craftmanship.
+
+The philosophy must guide the next version to express ideas VISUALLY, not through text. Information lives in design, not paragraphs.
+
+### PHILOSOPHY EXAMPLES
+
+**"Concrete Poetry"**
+Philosophy: Communication through monumental form and bold geometry.
+Visual expression: Massive color blocks, sculptural typography (huge single words, tiny labels), Brutalist spatial divisions, Polish poster energy meets Le Corbusier. Ideas expressed through visual weight and spatial tension, not explanation. Text as rare, powerful gesture - never paragraphs, only essential words integrated into the visual architecture. Every element placed with the precision of a master craftsman.
+
+**"Chromatic Language"**
+Philosophy: Color as the primary information system.
+Visual expression: Geometric precision where color zones create meaning. Typography minimal - small sans-serif labels letting chromatic fields communicate. Think Josef Albers' interaction meets data visualization. Information encoded spatially and chromatically. Words only to anchor what color already shows. The result of painstaking chromatic calibration.
+
+**"Analog Meditation"**
+Philosophy: Quiet visual contemplation through texture and breathing room.
+Visual expression: Paper grain, ink bleeds, vast negative space. Photography and illustration dominate. Typography whispered (small, restrained, serving the visual). Japanese photobook aesthetic. Images breathe across pages. Text appears sparingly - short phrases, never explanatory blocks. Each composition balanced with the care of a meditation practice.
+
+**"Organic Systems"**
+Philosophy: Natural clustering and modular growth patterns.
+Visual expression: Rounded forms, organic arrangements, color from nature through architecture. Information shown through visual diagrams, spatial relationships, iconography. Text only for key labels floating in space. The composition tells the story through expert spatial orchestration.
+
+**"Geometric Silence"**
+Philosophy: Pure order and restraint.
+Visual expression: Grid-based precision, bold photography or stark graphics, dramatic negative space. Typography precise but minimal - small essential text, large quiet zones. Swiss formalism meets Brutalist material honesty. Structure communicates, not words. Every alignment the work of countless refinements.
+
+*These are condensed examples. The actual design philosophy should be 4-6 substantial paragraphs.*
+
+### ESSENTIAL PRINCIPLES
+- **VISUAL PHILOSOPHY**: Create an aesthetic worldview to be expressed through design
+- **MINIMAL TEXT**: Always emphasize that text is sparse, essential-only, integrated as visual element - never lengthy
+- **SPATIAL EXPRESSION**: Ideas communicate through space, form, color, composition - not paragraphs
+- **ARTISTIC FREEDOM**: The next Claude interprets the philosophy visually - provide creative room
+- **PURE DESIGN**: This is about making ART OBJECTS, not documents with decoration
+- **EXPERT CRAFTSMANSHIP**: Repeatedly emphasize the final work must look meticulously crafted, labored over with care, the product of countless hours by someone at the top of their field
+
+**The design philosophy should be 4-6 paragraphs long.** Fill it with poetic design philosophy that brings together the core vision. Avoid repeating the same points. Keep the design philosophy generic without mentioning the intention of the art, as if it can be used wherever. Output the design philosophy as a .md file.
+
+---
+
+## DEDUCING THE SUBTLE REFERENCE
+
+**CRITICAL STEP**: Before creating the canvas, identify the subtle conceptual thread from the original request.
+
+**THE ESSENTIAL PRINCIPLE**:
+The topic is a **subtle, niche reference embedded within the art itself** - not always literal, always sophisticated. Someone familiar with the subject should feel it intuitively, while others simply experience a masterful abstract composition. The design philosophy provides the aesthetic language. The deduced topic provides the soul - the quiet conceptual DNA woven invisibly into form, color, and composition.
+
+This is **VERY IMPORTANT**: The reference must be refined so it enhances the work's depth without announcing itself. Think like a jazz musician quoting another song - only those who know will catch it, but everyone appreciates the music.
+
+---
+
+## CANVAS CREATION
+
+With both the philosophy and the conceptual framework established, express it on a canvas. Take a moment to gather thoughts and clear the mind. Use the design philosophy created and the instructions below to craft a masterpiece, embodying all aspects of the philosophy with expert craftsmanship.
+
+**IMPORTANT**: For any type of content, even if the user requests something for a movie/game/book, the approach should still be sophisticated. Never lose sight of the idea that this should be art, not something that's cartoony or amateur.
+
+To create museum or magazine quality work, use the design philosophy as the foundation. Create one single page, highly visual, design-forward PDF or PNG output (unless asked for more pages). Generally use repeating patterns and perfect shapes. Treat the abstract philosophical design as if it were a scientific bible, borrowing the visual language of systematic observation—dense accumulation of marks, repeated elements, or layered patterns that build meaning through patient repetition and reward sustained viewing. Add sparse, clinical typography and systematic reference markers that suggest this could be a diagram from an imaginary discipline, treating the invisible subject with the same reverence typically reserved for documenting observable phenomena. Anchor the piece with simple phrase(s) or details positioned subtly, using a limited color palette that feels intentional and cohesive. Embrace the paradox of using analytical visual language to express ideas about human experience: the result should feel like an artifact that proves something ephemeral can be studied, mapped, and understood through careful attention. This is true art.
+
+**Text as a contextual element**: Text is always minimal and visual-first, but let context guide whether that means whisper-quiet labels or bold typographic gestures. A punk venue poster might have larger, more aggressive type than a minimalist ceramics studio identity. Most of the time, font should be thin. All use of fonts must be design-forward and prioritize visual communication. Regardless of text scale, nothing falls off the page and nothing overlaps. Every element must be contained within the canvas boundaries with proper margins. Check carefully that all text, graphics, and visual elements have breathing room and clear separation. This is non-negotiable for professional execution. **IMPORTANT: Use different fonts if writing text. Search the `./canvas-fonts` directory. Regardless of approach, sophistication is non-negotiable.**
+
+Download and use whatever fonts are needed to make this a reality. Get creative by making the typography actually part of the art itself -- if the art is abstract, bring the font onto the canvas, not typeset digitally.
+
+To push boundaries, follow design instinct/intuition while using the philosophy as a guiding principle. Embrace ultimate design freedom and choice. Push aesthetics and design to the frontier.
+
+**CRITICAL**: To achieve human-crafted quality (not AI-generated), create work that looks like it took countless hours. Make it appear as though someone at the absolute top of their field labored over every detail with painstaking care. Ensure the composition, spacing, color choices, typography - everything screams expert-level craftsmanship. Double-check that nothing overlaps, formatting is flawless, every detail perfect. Create something that could be shown to people to prove expertise and rank as undeniably impressive.
+
+Output the final result as a single, downloadable .pdf or .png file, alongside the design philosophy used as a .md file.
+
+---
+
+## FINAL STEP
+
+**IMPORTANT**: The user ALREADY said "It isn't perfect enough. It must be pristine, a masterpiece if craftsmanship, as if it were about to be displayed in a museum."
+
+**CRITICAL**: To refine the work, avoid adding more graphics; instead refine what has been created and make it extremely crisp, respecting the design philosophy and the principles of minimalism entirely. Rather than adding a fun filter or refactoring a font, consider how to make the existing composition more cohesive with the art. If the instinct is to call a new function or draw a new shape, STOP and instead ask: "How can I make what's already here more of a piece of art?"
+
+Take a second pass. Go back to the code and refine/polish further to make this a philosophically designed masterpiece.
+
+## MULTI-PAGE OPTION
+
+To create additional pages when requested, create more creative pages along the same lines as the design philosophy but distinctly different as well. Bundle those pages in the same .pdf or many .pngs. Treat the first page as just a single page in a whole coffee table book waiting to be filled. Make the next pages unique twists and memories of the original. Have them almost tell a story in a very tasteful way. Exercise full creative freedom.
\ No newline at end of file
diff --git a/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt
new file mode 100644
index 0000000..1dad6ca
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2012 The Arsenal Project Authors (andrij.design@gmail.com)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf
new file mode 100644
index 0000000..fe5409b
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/ArsenalSC-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf
new file mode 100644
index 0000000..fc5f8fd
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt
new file mode 100644
index 0000000..b220280
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2019 The Big Shoulders Project Authors (https://github.com/xotypeco/big_shoulders)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf
new file mode 100644
index 0000000..de8308c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BigShoulders-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt
new file mode 100644
index 0000000..1890cb1
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Boldonse-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2024 The Boldonse Project Authors (https://github.com/googlefonts/boldonse)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf
new file mode 100644
index 0000000..43fa30a
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Boldonse-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf
new file mode 100644
index 0000000..f3b1ded
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt
new file mode 100644
index 0000000..fc2b216
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2022 The Bricolage Grotesque Project Authors (https://github.com/ateliertriay/bricolage)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf
new file mode 100644
index 0000000..0674ae3
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/BricolageGrotesque-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf
new file mode 100644
index 0000000..58730fb
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf
new file mode 100644
index 0000000..786a1bd
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Italic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt
new file mode 100644
index 0000000..f976fdc
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2018 The Crimson Pro Project Authors (https://github.com/Fonthausen/CrimsonPro)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf
new file mode 100644
index 0000000..f5666b9
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/CrimsonPro-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/DMMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/DMMono-OFL.txt
new file mode 100644
index 0000000..5b17f0c
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/DMMono-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2020 The DM Mono Project Authors (https://www.github.com/googlefonts/dm-mono)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf
new file mode 100644
index 0000000..7efe813
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/DMMono-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt
new file mode 100644
index 0000000..490d012
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/EricaOne-OFL.txt
@@ -0,0 +1,94 @@
+Copyright (c) 2011 by LatinoType Limitada (luciano@latinotype.com),
+with Reserved Font Names "Erica One"
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf
new file mode 100644
index 0000000..8bd91d1
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/EricaOne-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf
new file mode 100644
index 0000000..736ff7c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt
new file mode 100644
index 0000000..679a685
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/GeistMono-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2024 The Geist Project Authors (https://github.com/vercel/geist-font.git)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf
new file mode 100644
index 0000000..1a30262
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/GeistMono-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Gloock-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Gloock-OFL.txt
new file mode 100644
index 0000000..363acd3
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Gloock-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2022 The Gloock Project Authors (https://github.com/duartp/gloock)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf
new file mode 100644
index 0000000..3e58c4e
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Gloock-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf
new file mode 100644
index 0000000..247979c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt
new file mode 100644
index 0000000..e423b74
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-OFL.txt
@@ -0,0 +1,93 @@
+Copyright © 2017 IBM Corp. with Reserved Font Name "Plex"
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf
new file mode 100644
index 0000000..601ae94
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexMono-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf
new file mode 100644
index 0000000..78f6e50
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf
new file mode 100644
index 0000000..369b89d
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-BoldItalic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf
new file mode 100644
index 0000000..a4d859a
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Italic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf
new file mode 100644
index 0000000..35f454c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/IBMPlexSerif-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf
new file mode 100644
index 0000000..f602dce
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf
new file mode 100644
index 0000000..122b273
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-BoldItalic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf
new file mode 100644
index 0000000..4b98fb8
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Italic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt
new file mode 100644
index 0000000..4bb9914
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2022 The Instrument Sans Project Authors (https://github.com/Instrument/instrument-sans)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf
new file mode 100644
index 0000000..14c6113
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSans-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf
new file mode 100644
index 0000000..8fa958d
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Italic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf
new file mode 100644
index 0000000..9763031
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/InstrumentSerif-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Italiana-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Italiana-OFL.txt
new file mode 100644
index 0000000..ba8af21
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Italiana-OFL.txt
@@ -0,0 +1,93 @@
+Copyright (c) 2011, Santiago Orozco (hi@typemade.mx), with Reserved Font Name "Italiana".
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf
new file mode 100644
index 0000000..a9b828c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Italiana-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf
new file mode 100644
index 0000000..1926c80
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt
new file mode 100644
index 0000000..5ceee00
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2020 The JetBrains Mono Project Authors (https://github.com/JetBrains/JetBrainsMono)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf
new file mode 100644
index 0000000..436c982
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/JetBrainsMono-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Jura-Light.ttf b/.claude/skills/canvas-design/canvas-fonts/Jura-Light.ttf
new file mode 100644
index 0000000..dffbb33
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Jura-Light.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Jura-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/Jura-Medium.ttf
new file mode 100644
index 0000000..4bf91a3
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Jura-Medium.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Jura-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Jura-OFL.txt
new file mode 100644
index 0000000..64ad4c6
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Jura-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2019 The Jura Project Authors (https://github.com/ossobuffo/jura)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt
new file mode 100644
index 0000000..8c531fa
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2012 The Libre Baskerville Project Authors (https://github.com/impallari/Libre-Baskerville) with Reserved Font Name Libre Baskerville.
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf
new file mode 100644
index 0000000..c1abc26
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/LibreBaskerville-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-Bold.ttf
new file mode 100644
index 0000000..edae21e
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf
new file mode 100644
index 0000000..12dea8c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-BoldItalic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-Italic.ttf
new file mode 100644
index 0000000..e24b69b
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-Italic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Lora-OFL.txt
new file mode 100644
index 0000000..4cf1b95
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Lora-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2011 The Lora Project Authors (https://github.com/cyrealtype/Lora-Cyrillic), with Reserved Font Name "Lora".
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Lora-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Lora-Regular.ttf
new file mode 100644
index 0000000..dc751db
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Lora-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf
new file mode 100644
index 0000000..f4d7c02
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt
new file mode 100644
index 0000000..f4ec3fb
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/NationalPark-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2025 The National Park Project Authors (https://github.com/benhoepner/National-Park)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf
new file mode 100644
index 0000000..e4cbfbf
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/NationalPark-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt
new file mode 100644
index 0000000..c81eccd
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-OFL.txt
@@ -0,0 +1,93 @@
+Copyright (c) 2010, Kimberly Geswein (kimberlygeswein.com)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf
new file mode 100644
index 0000000..b086bce
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/NothingYouCouldDo-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf
new file mode 100644
index 0000000..f9f2f72
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Outfit-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Outfit-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Outfit-OFL.txt
new file mode 100644
index 0000000..fd0cb99
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Outfit-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2021 The Outfit Project Authors (https://github.com/Outfitio/Outfit-Fonts)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf
new file mode 100644
index 0000000..3939ab2
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Outfit-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf
new file mode 100644
index 0000000..95cd372
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-Medium.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt
new file mode 100644
index 0000000..b02d1b6
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/PixelifySans-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2021 The Pixelify Sans Project Authors (https://github.com/eifetx/Pixelify-Sans)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt
new file mode 100644
index 0000000..607bdad
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-OFL.txt
@@ -0,0 +1,93 @@
+Copyright (c) 2011, Denis Masharov (denis.masharov@gmail.com)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf
new file mode 100644
index 0000000..b339511
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/PoiretOne-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf
new file mode 100644
index 0000000..a6e3cf1
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt
new file mode 100644
index 0000000..16cf394
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2024 The Red Hat Project Authors (https://github.com/RedHatOfficial/RedHatFont)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf
new file mode 100644
index 0000000..3bf6a69
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/RedHatMono-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt
new file mode 100644
index 0000000..a1fe7d5
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2001 The Silkscreen Project Authors (https://github.com/googlefonts/silkscreen)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf
new file mode 100644
index 0000000..8abaa7c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Silkscreen-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf
new file mode 100644
index 0000000..0af9ead
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-Medium.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt
new file mode 100644
index 0000000..4c2f033
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/SmoochSans-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2016 The Smooch Sans Project Authors (https://github.com/googlefonts/smooch-sans)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf b/.claude/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf
new file mode 100644
index 0000000..34fc797
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Tektur-Medium.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/Tektur-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/Tektur-OFL.txt
new file mode 100644
index 0000000..2cad55f
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/Tektur-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2023 The Tektur Project Authors (https://www.github.com/hyvyys/Tektur)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf
new file mode 100644
index 0000000..f280fba
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/Tektur-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf
new file mode 100644
index 0000000..5c97989
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Bold.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf
new file mode 100644
index 0000000..54418b8
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-BoldItalic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf
new file mode 100644
index 0000000..40529b6
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Italic.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt
new file mode 100644
index 0000000..070f341
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/WorkSans-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2019 The Work Sans Project Authors (https://github.com/weiweihuanghuang/Work-Sans)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf
new file mode 100644
index 0000000..d24586c
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/WorkSans-Regular.ttf differ
diff --git a/.claude/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt
new file mode 100644
index 0000000..f09443c
--- /dev/null
+++ b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-OFL.txt
@@ -0,0 +1,93 @@
+Copyright 2023 The Young Serif Project Authors (https://github.com/noirblancrouge/YoungSerif)
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+https://openfontlicense.org
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/.claude/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf
new file mode 100644
index 0000000..f454fbe
Binary files /dev/null and b/.claude/skills/canvas-design/canvas-fonts/YoungSerif-Regular.ttf differ
diff --git a/.claude/skills/doc-coauthoring/.openskills.json b/.claude/skills/doc-coauthoring/.openskills.json
new file mode 100644
index 0000000..9c85a1b
--- /dev/null
+++ b/.claude/skills/doc-coauthoring/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\doc-coauthoring",
+ "installedAt": "2026-03-02T09:19:50.076Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/doc-coauthoring/SKILL.md b/.claude/skills/doc-coauthoring/SKILL.md
new file mode 100644
index 0000000..a5a6983
--- /dev/null
+++ b/.claude/skills/doc-coauthoring/SKILL.md
@@ -0,0 +1,375 @@
+---
+name: doc-coauthoring
+description: Guide users through a structured workflow for co-authoring documentation. Use when user wants to write documentation, proposals, technical specs, decision docs, or similar structured content. This workflow helps users efficiently transfer context, refine content through iteration, and verify the doc works for readers. Trigger when user mentions writing docs, creating proposals, drafting specs, or similar documentation tasks.
+---
+
+# Doc Co-Authoring Workflow
+
+This skill provides a structured workflow for guiding users through collaborative document creation. Act as an active guide, walking users through three stages: Context Gathering, Refinement & Structure, and Reader Testing.
+
+## When to Offer This Workflow
+
+**Trigger conditions:**
+- User mentions writing documentation: "write a doc", "draft a proposal", "create a spec", "write up"
+- User mentions specific doc types: "PRD", "design doc", "decision doc", "RFC"
+- User seems to be starting a substantial writing task
+
+**Initial offer:**
+Offer the user a structured workflow for co-authoring the document. Explain the three stages:
+
+1. **Context Gathering**: User provides all relevant context while Claude asks clarifying questions
+2. **Refinement & Structure**: Iteratively build each section through brainstorming and editing
+3. **Reader Testing**: Test the doc with a fresh Claude (no context) to catch blind spots before others read it
+
+Explain that this approach helps ensure the doc works well when others read it (including when they paste it into Claude). Ask if they want to try this workflow or prefer to work freeform.
+
+If user declines, work freeform. If user accepts, proceed to Stage 1.
+
+## Stage 1: Context Gathering
+
+**Goal:** Close the gap between what the user knows and what Claude knows, enabling smart guidance later.
+
+### Initial Questions
+
+Start by asking the user for meta-context about the document:
+
+1. What type of document is this? (e.g., technical spec, decision doc, proposal)
+2. Who's the primary audience?
+3. What's the desired impact when someone reads this?
+4. Is there a template or specific format to follow?
+5. Any other constraints or context to know?
+
+Inform them they can answer in shorthand or dump information however works best for them.
+
+**If user provides a template or mentions a doc type:**
+- Ask if they have a template document to share
+- If they provide a link to a shared document, use the appropriate integration to fetch it
+- If they provide a file, read it
+
+**If user mentions editing an existing shared document:**
+- Use the appropriate integration to read the current state
+- Check for images without alt-text
+- If images exist without alt-text, explain that when others use Claude to understand the doc, Claude won't be able to see them. Ask if they want alt-text generated. If so, request they paste each image into chat for descriptive alt-text generation.
+
+### Info Dumping
+
+Once initial questions are answered, encourage the user to dump all the context they have. Request information such as:
+- Background on the project/problem
+- Related team discussions or shared documents
+- Why alternative solutions aren't being used
+- Organizational context (team dynamics, past incidents, politics)
+- Timeline pressures or constraints
+- Technical architecture or dependencies
+- Stakeholder concerns
+
+Advise them not to worry about organizing it - just get it all out. Offer multiple ways to provide context:
+- Info dump stream-of-consciousness
+- Point to team channels or threads to read
+- Link to shared documents
+
+**If integrations are available** (e.g., Slack, Teams, Google Drive, SharePoint, or other MCP servers), mention that these can be used to pull in context directly.
+
+**If no integrations are detected and in Claude.ai or Claude app:** Suggest they can enable connectors in their Claude settings to allow pulling context from messaging apps and document storage directly.
+
+Inform them clarifying questions will be asked once they've done their initial dump.
+
+**During context gathering:**
+
+- If user mentions team channels or shared documents:
+ - If integrations available: Inform them the content will be read now, then use the appropriate integration
+ - If integrations not available: Explain lack of access. Suggest they enable connectors in Claude settings, or paste the relevant content directly.
+
+- If user mentions entities/projects that are unknown:
+ - Ask if connected tools should be searched to learn more
+ - Wait for user confirmation before searching
+
+- As user provides context, track what's being learned and what's still unclear
+
+**Asking clarifying questions:**
+
+When user signals they've done their initial dump (or after substantial context provided), ask clarifying questions to ensure understanding:
+
+Generate 5-10 numbered questions based on gaps in the context.
+
+Inform them they can use shorthand to answer (e.g., "1: yes, 2: see #channel, 3: no because backwards compat"), link to more docs, point to channels to read, or just keep info-dumping. Whatever's most efficient for them.
+
+**Exit condition:**
+Sufficient context has been gathered when questions show understanding - when edge cases and trade-offs can be asked about without needing basics explained.
+
+**Transition:**
+Ask if there's any more context they want to provide at this stage, or if it's time to move on to drafting the document.
+
+If user wants to add more, let them. When ready, proceed to Stage 2.
+
+## Stage 2: Refinement & Structure
+
+**Goal:** Build the document section by section through brainstorming, curation, and iterative refinement.
+
+**Instructions to user:**
+Explain that the document will be built section by section. For each section:
+1. Clarifying questions will be asked about what to include
+2. 5-20 options will be brainstormed
+3. User will indicate what to keep/remove/combine
+4. The section will be drafted
+5. It will be refined through surgical edits
+
+Start with whichever section has the most unknowns (usually the core decision/proposal), then work through the rest.
+
+**Section ordering:**
+
+If the document structure is clear:
+Ask which section they'd like to start with.
+
+Suggest starting with whichever section has the most unknowns. For decision docs, that's usually the core proposal. For specs, it's typically the technical approach. Summary sections are best left for last.
+
+If user doesn't know what sections they need:
+Based on the type of document and template, suggest 3-5 sections appropriate for the doc type.
+
+Ask if this structure works, or if they want to adjust it.
+
+**Once structure is agreed:**
+
+Create the initial document structure with placeholder text for all sections.
+
+**If access to artifacts is available:**
+Use `create_file` to create an artifact. This gives both Claude and the user a scaffold to work from.
+
+Inform them that the initial structure with placeholders for all sections will be created.
+
+Create artifact with all section headers and brief placeholder text like "[To be written]" or "[Content here]".
+
+Provide the scaffold link and indicate it's time to fill in each section.
+
+**If no access to artifacts:**
+Create a markdown file in the working directory. Name it appropriately (e.g., `decision-doc.md`, `technical-spec.md`).
+
+Inform them that the initial structure with placeholders for all sections will be created.
+
+Create file with all section headers and placeholder text.
+
+Confirm the filename has been created and indicate it's time to fill in each section.
+
+**For each section:**
+
+### Step 1: Clarifying Questions
+
+Announce work will begin on the [SECTION NAME] section. Ask 5-10 clarifying questions about what should be included:
+
+Generate 5-10 specific questions based on context and section purpose.
+
+Inform them they can answer in shorthand or just indicate what's important to cover.
+
+### Step 2: Brainstorming
+
+For the [SECTION NAME] section, brainstorm [5-20] things that might be included, depending on the section's complexity. Look for:
+- Context shared that might have been forgotten
+- Angles or considerations not yet mentioned
+
+Generate 5-20 numbered options based on section complexity. At the end, offer to brainstorm more if they want additional options.
+
+### Step 3: Curation
+
+Ask which points should be kept, removed, or combined. Request brief justifications to help learn priorities for the next sections.
+
+Provide examples:
+- "Keep 1,4,7,9"
+- "Remove 3 (duplicates 1)"
+- "Remove 6 (audience already knows this)"
+- "Combine 11 and 12"
+
+**If user gives freeform feedback** (e.g., "looks good" or "I like most of it but...") instead of numbered selections, extract their preferences and proceed. Parse what they want kept/removed/changed and apply it.
+
+### Step 4: Gap Check
+
+Based on what they've selected, ask if there's anything important missing for the [SECTION NAME] section.
+
+### Step 5: Drafting
+
+Use `str_replace` to replace the placeholder text for this section with the actual drafted content.
+
+Announce the [SECTION NAME] section will be drafted now based on what they've selected.
+
+**If using artifacts:**
+After drafting, provide a link to the artifact.
+
+Ask them to read through it and indicate what to change. Note that being specific helps learning for the next sections.
+
+**If using a file (no artifacts):**
+After drafting, confirm completion.
+
+Inform them the [SECTION NAME] section has been drafted in [filename]. Ask them to read through it and indicate what to change. Note that being specific helps learning for the next sections.
+
+**Key instruction for user (include when drafting the first section):**
+Provide a note: Instead of editing the doc directly, ask them to indicate what to change. This helps learning of their style for future sections. For example: "Remove the X bullet - already covered by Y" or "Make the third paragraph more concise".
+
+### Step 6: Iterative Refinement
+
+As user provides feedback:
+- Use `str_replace` to make edits (never reprint the whole doc)
+- **If using artifacts:** Provide link to artifact after each edit
+- **If using files:** Just confirm edits are complete
+- If user edits doc directly and asks to read it: mentally note the changes they made and keep them in mind for future sections (this shows their preferences)
+
+**Continue iterating** until user is satisfied with the section.
+
+### Quality Checking
+
+After 3 consecutive iterations with no substantial changes, ask if anything can be removed without losing important information.
+
+When section is done, confirm [SECTION NAME] is complete. Ask if ready to move to the next section.
+
+**Repeat for all sections.**
+
+### Near Completion
+
+As approaching completion (80%+ of sections done), announce intention to re-read the entire document and check for:
+- Flow and consistency across sections
+- Redundancy or contradictions
+- Anything that feels like "slop" or generic filler
+- Whether every sentence carries weight
+
+Read entire document and provide feedback.
+
+**When all sections are drafted and refined:**
+Announce all sections are drafted. Indicate intention to review the complete document one more time.
+
+Review for overall coherence, flow, completeness.
+
+Provide any final suggestions.
+
+Ask if ready to move to Reader Testing, or if they want to refine anything else.
+
+## Stage 3: Reader Testing
+
+**Goal:** Test the document with a fresh Claude (no context bleed) to verify it works for readers.
+
+**Instructions to user:**
+Explain that testing will now occur to see if the document actually works for readers. This catches blind spots - things that make sense to the authors but might confuse others.
+
+### Testing Approach
+
+**If access to sub-agents is available (e.g., in Claude Code):**
+
+Perform the testing directly without user involvement.
+
+### Step 1: Predict Reader Questions
+
+Announce intention to predict what questions readers might ask when trying to discover this document.
+
+Generate 5-10 questions that readers would realistically ask.
+
+### Step 2: Test with Sub-Agent
+
+Announce that these questions will be tested with a fresh Claude instance (no context from this conversation).
+
+For each question, invoke a sub-agent with just the document content and the question.
+
+Summarize what Reader Claude got right/wrong for each question.
+
+### Step 3: Run Additional Checks
+
+Announce additional checks will be performed.
+
+Invoke sub-agent to check for ambiguity, false assumptions, contradictions.
+
+Summarize any issues found.
+
+### Step 4: Report and Fix
+
+If issues found:
+Report that Reader Claude struggled with specific issues.
+
+List the specific issues.
+
+Indicate intention to fix these gaps.
+
+Loop back to refinement for problematic sections.
+
+---
+
+**If no access to sub-agents (e.g., claude.ai web interface):**
+
+The user will need to do the testing manually.
+
+### Step 1: Predict Reader Questions
+
+Ask what questions people might ask when trying to discover this document. What would they type into Claude.ai?
+
+Generate 5-10 questions that readers would realistically ask.
+
+### Step 2: Setup Testing
+
+Provide testing instructions:
+1. Open a fresh Claude conversation: https://claude.ai
+2. Paste or share the document content (if using a shared doc platform with connectors enabled, provide the link)
+3. Ask Reader Claude the generated questions
+
+For each question, instruct Reader Claude to provide:
+- The answer
+- Whether anything was ambiguous or unclear
+- What knowledge/context the doc assumes is already known
+
+Check if Reader Claude gives correct answers or misinterprets anything.
+
+### Step 3: Additional Checks
+
+Also ask Reader Claude:
+- "What in this doc might be ambiguous or unclear to readers?"
+- "What knowledge or context does this doc assume readers already have?"
+- "Are there any internal contradictions or inconsistencies?"
+
+### Step 4: Iterate Based on Results
+
+Ask what Reader Claude got wrong or struggled with. Indicate intention to fix those gaps.
+
+Loop back to refinement for any problematic sections.
+
+---
+
+### Exit Condition (Both Approaches)
+
+When Reader Claude consistently answers questions correctly and doesn't surface new gaps or ambiguities, the doc is ready.
+
+## Final Review
+
+When Reader Testing passes:
+Announce the doc has passed Reader Claude testing. Before completion:
+
+1. Recommend they do a final read-through themselves - they own this document and are responsible for its quality
+2. Suggest double-checking any facts, links, or technical details
+3. Ask them to verify it achieves the impact they wanted
+
+Ask if they want one more review, or if the work is done.
+
+**If user wants final review, provide it. Otherwise:**
+Announce document completion. Provide a few final tips:
+- Consider linking this conversation in an appendix so readers can see how the doc was developed
+- Use appendices to provide depth without bloating the main doc
+- Update the doc as feedback is received from real readers
+
+## Tips for Effective Guidance
+
+**Tone:**
+- Be direct and procedural
+- Explain rationale briefly when it affects user behavior
+- Don't try to "sell" the approach - just execute it
+
+**Handling Deviations:**
+- If user wants to skip a stage: Ask if they want to skip this and write freeform
+- If user seems frustrated: Acknowledge this is taking longer than expected. Suggest ways to move faster
+- Always give user agency to adjust the process
+
+**Context Management:**
+- Throughout, if context is missing on something mentioned, proactively ask
+- Don't let gaps accumulate - address them as they come up
+
+**Artifact Management:**
+- Use `create_file` for drafting full sections
+- Use `str_replace` for all edits
+- Provide artifact link after every change
+- Never use artifacts for brainstorming lists - that's just conversation
+
+**Quality over Speed:**
+- Don't rush through stages
+- Each iteration should make meaningful improvements
+- The goal is a document that actually works for readers
diff --git a/.claude/skills/docx/.openskills.json b/.claude/skills/docx/.openskills.json
new file mode 100644
index 0000000..1cdf74d
--- /dev/null
+++ b/.claude/skills/docx/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\docx",
+ "installedAt": "2026-03-02T09:19:50.102Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/docx/LICENSE.txt b/.claude/skills/docx/LICENSE.txt
new file mode 100644
index 0000000..c55ab42
--- /dev/null
+++ b/.claude/skills/docx/LICENSE.txt
@@ -0,0 +1,30 @@
+© 2025 Anthropic, PBC. All rights reserved.
+
+LICENSE: Use of these materials (including all code, prompts, assets, files,
+and other components of this Skill) is governed by your agreement with
+Anthropic regarding use of Anthropic's services. If no separate agreement
+exists, use is governed by Anthropic's Consumer Terms of Service or
+Commercial Terms of Service, as applicable:
+https://www.anthropic.com/legal/consumer-terms
+https://www.anthropic.com/legal/commercial-terms
+Your applicable agreement is referred to as the "Agreement." "Services" are
+as defined in the Agreement.
+
+ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the
+contrary, users may not:
+
+- Extract these materials from the Services or retain copies of these
+ materials outside the Services
+- Reproduce or copy these materials, except for temporary copies created
+ automatically during authorized use of the Services
+- Create derivative works based on these materials
+- Distribute, sublicense, or transfer these materials to any third party
+- Make, offer to sell, sell, or import any inventions embodied in these
+ materials
+- Reverse engineer, decompile, or disassemble these materials
+
+The receipt, viewing, or possession of these materials does not convey or
+imply any license or right beyond those expressly granted above.
+
+Anthropic retains all right, title, and interest in these materials,
+including all copyrights, patents, and other intellectual property rights.
diff --git a/.claude/skills/docx/SKILL.md b/.claude/skills/docx/SKILL.md
new file mode 100644
index 0000000..2951e55
--- /dev/null
+++ b/.claude/skills/docx/SKILL.md
@@ -0,0 +1,590 @@
+---
+name: docx
+description: "Use this skill whenever the user wants to create, read, edit, or manipulate Word documents (.docx files). Triggers include: any mention of 'Word doc', 'word document', '.docx', or requests to produce professional documents with formatting like tables of contents, headings, page numbers, or letterheads. Also use when extracting or reorganizing content from .docx files, inserting or replacing images in documents, performing find-and-replace in Word files, working with tracked changes or comments, or converting content into a polished Word document. If the user asks for a 'report', 'memo', 'letter', 'template', or similar deliverable as a Word or .docx file, use this skill. Do NOT use for PDFs, spreadsheets, Google Docs, or general coding tasks unrelated to document generation."
+license: Proprietary. LICENSE.txt has complete terms
+---
+
+# DOCX creation, editing, and analysis
+
+## Overview
+
+A .docx file is a ZIP archive containing XML files.
+
+## Quick Reference
+
+| Task | Approach |
+|------|----------|
+| Read/analyze content | `pandoc` or unpack for raw XML |
+| Create new document | Use `docx-js` - see Creating New Documents below |
+| Edit existing document | Unpack → edit XML → repack - see Editing Existing Documents below |
+
+### Converting .doc to .docx
+
+Legacy `.doc` files must be converted before editing:
+
+```bash
+python scripts/office/soffice.py --headless --convert-to docx document.doc
+```
+
+### Reading Content
+
+```bash
+# Text extraction with tracked changes
+pandoc --track-changes=all document.docx -o output.md
+
+# Raw XML access
+python scripts/office/unpack.py document.docx unpacked/
+```
+
+### Converting to Images
+
+```bash
+python scripts/office/soffice.py --headless --convert-to pdf document.docx
+pdftoppm -jpeg -r 150 document.pdf page
+```
+
+### Accepting Tracked Changes
+
+To produce a clean document with all tracked changes accepted (requires LibreOffice):
+
+```bash
+python scripts/accept_changes.py input.docx output.docx
+```
+
+---
+
+## Creating New Documents
+
+Generate .docx files with JavaScript, then validate. Install: `npm install -g docx`
+
+### Setup
+```javascript
+const { Document, Packer, Paragraph, TextRun, Table, TableRow, TableCell, ImageRun,
+ Header, Footer, AlignmentType, PageOrientation, LevelFormat, ExternalHyperlink,
+ InternalHyperlink, Bookmark, FootnoteReferenceRun, PositionalTab,
+ PositionalTabAlignment, PositionalTabRelativeTo, PositionalTabLeader,
+ TabStopType, TabStopPosition, Column, SectionType,
+ TableOfContents, HeadingLevel, BorderStyle, WidthType, ShadingType,
+ VerticalAlign, PageNumber, PageBreak } = require('docx');
+
+const doc = new Document({ sections: [{ children: [/* content */] }] });
+Packer.toBuffer(doc).then(buffer => fs.writeFileSync("doc.docx", buffer));
+```
+
+### Validation
+After creating the file, validate it. If validation fails, unpack, fix the XML, and repack.
+```bash
+python scripts/office/validate.py doc.docx
+```
+
+### Page Size
+
+```javascript
+// CRITICAL: docx-js defaults to A4, not US Letter
+// Always set page size explicitly for consistent results
+sections: [{
+ properties: {
+ page: {
+ size: {
+ width: 12240, // 8.5 inches in DXA
+ height: 15840 // 11 inches in DXA
+ },
+ margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } // 1 inch margins
+ }
+ },
+ children: [/* content */]
+}]
+```
+
+**Common page sizes (DXA units, 1440 DXA = 1 inch):**
+
+| Paper | Width | Height | Content Width (1" margins) |
+|-------|-------|--------|---------------------------|
+| US Letter | 12,240 | 15,840 | 9,360 |
+| A4 (default) | 11,906 | 16,838 | 9,026 |
+
+**Landscape orientation:** docx-js swaps width/height internally, so pass portrait dimensions and let it handle the swap:
+```javascript
+size: {
+ width: 12240, // Pass SHORT edge as width
+ height: 15840, // Pass LONG edge as height
+ orientation: PageOrientation.LANDSCAPE // docx-js swaps them in the XML
+},
+// Content width = 15840 - left margin - right margin (uses the long edge)
+```
+
+### Styles (Override Built-in Headings)
+
+Use Arial as the default font (universally supported). Keep titles black for readability.
+
+```javascript
+const doc = new Document({
+ styles: {
+ default: { document: { run: { font: "Arial", size: 24 } } }, // 12pt default
+ paragraphStyles: [
+ // IMPORTANT: Use exact IDs to override built-in styles
+ { id: "Heading1", name: "Heading 1", basedOn: "Normal", next: "Normal", quickFormat: true,
+ run: { size: 32, bold: true, font: "Arial" },
+ paragraph: { spacing: { before: 240, after: 240 }, outlineLevel: 0 } }, // outlineLevel required for TOC
+ { id: "Heading2", name: "Heading 2", basedOn: "Normal", next: "Normal", quickFormat: true,
+ run: { size: 28, bold: true, font: "Arial" },
+ paragraph: { spacing: { before: 180, after: 180 }, outlineLevel: 1 } },
+ ]
+ },
+ sections: [{
+ children: [
+ new Paragraph({ heading: HeadingLevel.HEADING_1, children: [new TextRun("Title")] }),
+ ]
+ }]
+});
+```
+
+### Lists (NEVER use unicode bullets)
+
+```javascript
+// ❌ WRONG - never manually insert bullet characters
+new Paragraph({ children: [new TextRun("• Item")] }) // BAD
+new Paragraph({ children: [new TextRun("\u2022 Item")] }) // BAD
+
+// ✅ CORRECT - use numbering config with LevelFormat.BULLET
+const doc = new Document({
+ numbering: {
+ config: [
+ { reference: "bullets",
+ levels: [{ level: 0, format: LevelFormat.BULLET, text: "•", alignment: AlignmentType.LEFT,
+ style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] },
+ { reference: "numbers",
+ levels: [{ level: 0, format: LevelFormat.DECIMAL, text: "%1.", alignment: AlignmentType.LEFT,
+ style: { paragraph: { indent: { left: 720, hanging: 360 } } } }] },
+ ]
+ },
+ sections: [{
+ children: [
+ new Paragraph({ numbering: { reference: "bullets", level: 0 },
+ children: [new TextRun("Bullet item")] }),
+ new Paragraph({ numbering: { reference: "numbers", level: 0 },
+ children: [new TextRun("Numbered item")] }),
+ ]
+ }]
+});
+
+// ⚠️ Each reference creates INDEPENDENT numbering
+// Same reference = continues (1,2,3 then 4,5,6)
+// Different reference = restarts (1,2,3 then 1,2,3)
+```
+
+### Tables
+
+**CRITICAL: Tables need dual widths** - set both `columnWidths` on the table AND `width` on each cell. Without both, tables render incorrectly on some platforms.
+
+```javascript
+// CRITICAL: Always set table width for consistent rendering
+// CRITICAL: Use ShadingType.CLEAR (not SOLID) to prevent black backgrounds
+const border = { style: BorderStyle.SINGLE, size: 1, color: "CCCCCC" };
+const borders = { top: border, bottom: border, left: border, right: border };
+
+new Table({
+ width: { size: 9360, type: WidthType.DXA }, // Always use DXA (percentages break in Google Docs)
+ columnWidths: [4680, 4680], // Must sum to table width (DXA: 1440 = 1 inch)
+ rows: [
+ new TableRow({
+ children: [
+ new TableCell({
+ borders,
+ width: { size: 4680, type: WidthType.DXA }, // Also set on each cell
+ shading: { fill: "D5E8F0", type: ShadingType.CLEAR }, // CLEAR not SOLID
+ margins: { top: 80, bottom: 80, left: 120, right: 120 }, // Cell padding (internal, not added to width)
+ children: [new Paragraph({ children: [new TextRun("Cell")] })]
+ })
+ ]
+ })
+ ]
+})
+```
+
+**Table width calculation:**
+
+Always use `WidthType.DXA` — `WidthType.PERCENTAGE` breaks in Google Docs.
+
+```javascript
+// Table width = sum of columnWidths = content width
+// US Letter with 1" margins: 12240 - 2880 = 9360 DXA
+width: { size: 9360, type: WidthType.DXA },
+columnWidths: [7000, 2360] // Must sum to table width
+```
+
+**Width rules:**
+- **Always use `WidthType.DXA`** — never `WidthType.PERCENTAGE` (incompatible with Google Docs)
+- Table width must equal the sum of `columnWidths`
+- Cell `width` must match corresponding `columnWidth`
+- Cell `margins` are internal padding - they reduce content area, not add to cell width
+- For full-width tables: use content width (page width minus left and right margins)
+
+### Images
+
+```javascript
+// CRITICAL: type parameter is REQUIRED
+new Paragraph({
+ children: [new ImageRun({
+ type: "png", // Required: png, jpg, jpeg, gif, bmp, svg
+ data: fs.readFileSync("image.png"),
+ transformation: { width: 200, height: 150 },
+ altText: { title: "Title", description: "Desc", name: "Name" } // All three required
+ })]
+})
+```
+
+### Page Breaks
+
+```javascript
+// CRITICAL: PageBreak must be inside a Paragraph
+new Paragraph({ children: [new PageBreak()] })
+
+// Or use pageBreakBefore
+new Paragraph({ pageBreakBefore: true, children: [new TextRun("New page")] })
+```
+
+### Hyperlinks
+
+```javascript
+// External link
+new Paragraph({
+ children: [new ExternalHyperlink({
+ children: [new TextRun({ text: "Click here", style: "Hyperlink" })],
+ link: "https://example.com",
+ })]
+})
+
+// Internal link (bookmark + reference)
+// 1. Create bookmark at destination
+new Paragraph({ heading: HeadingLevel.HEADING_1, children: [
+ new Bookmark({ id: "chapter1", children: [new TextRun("Chapter 1")] }),
+]})
+// 2. Link to it
+new Paragraph({ children: [new InternalHyperlink({
+ children: [new TextRun({ text: "See Chapter 1", style: "Hyperlink" })],
+ anchor: "chapter1",
+})]})
+```
+
+### Footnotes
+
+```javascript
+const doc = new Document({
+ footnotes: {
+ 1: { children: [new Paragraph("Source: Annual Report 2024")] },
+ 2: { children: [new Paragraph("See appendix for methodology")] },
+ },
+ sections: [{
+ children: [new Paragraph({
+ children: [
+ new TextRun("Revenue grew 15%"),
+ new FootnoteReferenceRun(1),
+ new TextRun(" using adjusted metrics"),
+ new FootnoteReferenceRun(2),
+ ],
+ })]
+ }]
+});
+```
+
+### Tab Stops
+
+```javascript
+// Right-align text on same line (e.g., date opposite a title)
+new Paragraph({
+ children: [
+ new TextRun("Company Name"),
+ new TextRun("\tJanuary 2025"),
+ ],
+ tabStops: [{ type: TabStopType.RIGHT, position: TabStopPosition.MAX }],
+})
+
+// Dot leader (e.g., TOC-style)
+new Paragraph({
+ children: [
+ new TextRun("Introduction"),
+ new TextRun({ children: [
+ new PositionalTab({
+ alignment: PositionalTabAlignment.RIGHT,
+ relativeTo: PositionalTabRelativeTo.MARGIN,
+ leader: PositionalTabLeader.DOT,
+ }),
+ "3",
+ ]}),
+ ],
+})
+```
+
+### Multi-Column Layouts
+
+```javascript
+// Equal-width columns
+sections: [{
+ properties: {
+ column: {
+ count: 2, // number of columns
+ space: 720, // gap between columns in DXA (720 = 0.5 inch)
+ equalWidth: true,
+ separate: true, // vertical line between columns
+ },
+ },
+ children: [/* content flows naturally across columns */]
+}]
+
+// Custom-width columns (equalWidth must be false)
+sections: [{
+ properties: {
+ column: {
+ equalWidth: false,
+ children: [
+ new Column({ width: 5400, space: 720 }),
+ new Column({ width: 3240 }),
+ ],
+ },
+ },
+ children: [/* content */]
+}]
+```
+
+Force a column break with a new section using `type: SectionType.NEXT_COLUMN`.
+
+### Table of Contents
+
+```javascript
+// CRITICAL: Headings must use HeadingLevel ONLY - no custom styles
+new TableOfContents("Table of Contents", { hyperlink: true, headingStyleRange: "1-3" })
+```
+
+### Headers/Footers
+
+```javascript
+sections: [{
+ properties: {
+ page: { margin: { top: 1440, right: 1440, bottom: 1440, left: 1440 } } // 1440 = 1 inch
+ },
+ headers: {
+ default: new Header({ children: [new Paragraph({ children: [new TextRun("Header")] })] })
+ },
+ footers: {
+ default: new Footer({ children: [new Paragraph({
+ children: [new TextRun("Page "), new TextRun({ children: [PageNumber.CURRENT] })]
+ })] })
+ },
+ children: [/* content */]
+}]
+```
+
+### Critical Rules for docx-js
+
+- **Set page size explicitly** - docx-js defaults to A4; use US Letter (12240 x 15840 DXA) for US documents
+- **Landscape: pass portrait dimensions** - docx-js swaps width/height internally; pass short edge as `width`, long edge as `height`, and set `orientation: PageOrientation.LANDSCAPE`
+- **Never use `\n`** - use separate Paragraph elements
+- **Never use unicode bullets** - use `LevelFormat.BULLET` with numbering config
+- **PageBreak must be in Paragraph** - standalone creates invalid XML
+- **ImageRun requires `type`** - always specify png/jpg/etc
+- **Always set table `width` with DXA** - never use `WidthType.PERCENTAGE` (breaks in Google Docs)
+- **Tables need dual widths** - `columnWidths` array AND cell `width`, both must match
+- **Table width = sum of columnWidths** - for DXA, ensure they add up exactly
+- **Always add cell margins** - use `margins: { top: 80, bottom: 80, left: 120, right: 120 }` for readable padding
+- **Use `ShadingType.CLEAR`** - never SOLID for table shading
+- **Never use tables as dividers/rules** - cells have minimum height and render as empty boxes (including in headers/footers); use `border: { bottom: { style: BorderStyle.SINGLE, size: 6, color: "2E75B6", space: 1 } }` on a Paragraph instead. For two-column footers, use tab stops (see Tab Stops section), not tables
+- **TOC requires HeadingLevel only** - no custom styles on heading paragraphs
+- **Override built-in styles** - use exact IDs: "Heading1", "Heading2", etc.
+- **Include `outlineLevel`** - required for TOC (0 for H1, 1 for H2, etc.)
+
+---
+
+## Editing Existing Documents
+
+**Follow all 3 steps in order.**
+
+### Step 1: Unpack
+```bash
+python scripts/office/unpack.py document.docx unpacked/
+```
+Extracts XML, pretty-prints, merges adjacent runs, and converts smart quotes to XML entities (`“` etc.) so they survive editing. Use `--merge-runs false` to skip run merging.
+
+### Step 2: Edit XML
+
+Edit files in `unpacked/word/`. See XML Reference below for patterns.
+
+**Use "Claude" as the author** for tracked changes and comments, unless the user explicitly requests use of a different name.
+
+**Use the Edit tool directly for string replacement. Do not write Python scripts.** Scripts introduce unnecessary complexity. The Edit tool shows exactly what is being replaced.
+
+**CRITICAL: Use smart quotes for new content.** When adding text with apostrophes or quotes, use XML entities to produce smart quotes:
+```xml
+
+Here’s a quote: “Hello”
+```
+| Entity | Character |
+|--------|-----------|
+| `‘` | ‘ (left single) |
+| `’` | ’ (right single / apostrophe) |
+| `“` | “ (left double) |
+| `”` | ” (right double) |
+
+**Adding comments:** Use `comment.py` to handle boilerplate across multiple XML files (text must be pre-escaped XML):
+```bash
+python scripts/comment.py unpacked/ 0 "Comment text with & and ’"
+python scripts/comment.py unpacked/ 1 "Reply text" --parent 0 # reply to comment 0
+python scripts/comment.py unpacked/ 0 "Text" --author "Custom Author" # custom author name
+```
+Then add markers to document.xml (see Comments in XML Reference).
+
+### Step 3: Pack
+```bash
+python scripts/office/pack.py unpacked/ output.docx --original document.docx
+```
+Validates with auto-repair, condenses XML, and creates DOCX. Use `--validate false` to skip.
+
+**Auto-repair will fix:**
+- `durableId` >= 0x7FFFFFFF (regenerates valid ID)
+- Missing `xml:space="preserve"` on `` with whitespace
+
+**Auto-repair won't fix:**
+- Malformed XML, invalid element nesting, missing relationships, schema violations
+
+### Common Pitfalls
+
+- **Replace entire `` elements**: When adding tracked changes, replace the whole `... ` block with `......` as siblings. Don't inject tracked change tags inside a run.
+- **Preserve `` formatting**: Copy the original run's `` block into your tracked change runs to maintain bold, font size, etc.
+
+---
+
+## XML Reference
+
+### Schema Compliance
+
+- **Element order in ``**: ``, ``, ``, ``, ``, `` last
+- **Whitespace**: Add `xml:space="preserve"` to `` with leading/trailing spaces
+- **RSIDs**: Must be 8-digit hex (e.g., `00AB1234`)
+
+### Tracked Changes
+
+**Insertion:**
+```xml
+
+ inserted text
+
+```
+
+**Deletion:**
+```xml
+
+ deleted text
+
+```
+
+**Inside ``**: Use `` instead of ``, and `` instead of ``.
+
+**Minimal edits** - only mark what changes:
+```xml
+
+The term is
+
+ 30
+
+
+ 60
+
+ days.
+```
+
+**Deleting entire paragraphs/list items** - when removing ALL content from a paragraph, also mark the paragraph mark as deleted so it merges with the next paragraph. Add ` ` inside ``:
+```xml
+
+
+ ...
+
+
+
+
+
+ Entire paragraph content being deleted...
+
+
+```
+Without the ` ` in ``, accepting changes leaves an empty paragraph/list item.
+
+**Rejecting another author's insertion** - nest deletion inside their insertion:
+```xml
+
+
+ their inserted text
+
+
+```
+
+**Restoring another author's deletion** - add insertion after (don't modify their deletion):
+```xml
+
+ deleted text
+
+
+ deleted text
+
+```
+
+### Comments
+
+After running `comment.py` (see Step 2), add markers to document.xml. For replies, use `--parent` flag and nest markers inside the parent's.
+
+**CRITICAL: `` and `` are siblings of ``, never inside ``.**
+
+```xml
+
+
+
+ deleted
+
+ more text
+
+
+
+
+
+
+ text
+
+
+
+
+```
+
+### Images
+
+1. Add image file to `word/media/`
+2. Add relationship to `word/_rels/document.xml.rels`:
+```xml
+
+```
+3. Add content type to `[Content_Types].xml`:
+```xml
+
+```
+4. Reference in document.xml:
+```xml
+
+
+
+
+
+
+
+
+
+
+
+
+```
+
+---
+
+## Dependencies
+
+- **pandoc**: Text extraction
+- **docx**: `npm install -g docx` (new documents)
+- **LibreOffice**: PDF conversion (auto-configured for sandboxed environments via `scripts/office/soffice.py`)
+- **Poppler**: `pdftoppm` for images
diff --git a/.claude/skills/docx/scripts/__init__.py b/.claude/skills/docx/scripts/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/.claude/skills/docx/scripts/__init__.py
@@ -0,0 +1 @@
+
diff --git a/.claude/skills/docx/scripts/accept_changes.py b/.claude/skills/docx/scripts/accept_changes.py
new file mode 100644
index 0000000..8e36316
--- /dev/null
+++ b/.claude/skills/docx/scripts/accept_changes.py
@@ -0,0 +1,135 @@
+"""Accept all tracked changes in a DOCX file using LibreOffice.
+
+Requires LibreOffice (soffice) to be installed.
+"""
+
+import argparse
+import logging
+import shutil
+import subprocess
+from pathlib import Path
+
+from office.soffice import get_soffice_env
+
+logger = logging.getLogger(__name__)
+
+LIBREOFFICE_PROFILE = "/tmp/libreoffice_docx_profile"
+MACRO_DIR = f"{LIBREOFFICE_PROFILE}/user/basic/Standard"
+
+ACCEPT_CHANGES_MACRO = """
+
+
+ Sub AcceptAllTrackedChanges()
+ Dim document As Object
+ Dim dispatcher As Object
+
+ document = ThisComponent.CurrentController.Frame
+ dispatcher = createUnoService("com.sun.star.frame.DispatchHelper")
+
+ dispatcher.executeDispatch(document, ".uno:AcceptAllTrackedChanges", "", 0, Array())
+ ThisComponent.store()
+ ThisComponent.close(True)
+ End Sub
+ """
+
+
+def accept_changes(
+ input_file: str,
+ output_file: str,
+) -> tuple[None, str]:
+ input_path = Path(input_file)
+ output_path = Path(output_file)
+
+ if not input_path.exists():
+ return None, f"Error: Input file not found: {input_file}"
+
+ if not input_path.suffix.lower() == ".docx":
+ return None, f"Error: Input file is not a DOCX file: {input_file}"
+
+ try:
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(input_path, output_path)
+ except Exception as e:
+ return None, f"Error: Failed to copy input file to output location: {e}"
+
+ if not _setup_libreoffice_macro():
+ return None, "Error: Failed to setup LibreOffice macro"
+
+ cmd = [
+ "soffice",
+ "--headless",
+ f"-env:UserInstallation=file://{LIBREOFFICE_PROFILE}",
+ "--norestore",
+ "vnd.sun.star.script:Standard.Module1.AcceptAllTrackedChanges?language=Basic&location=application",
+ str(output_path.absolute()),
+ ]
+
+ try:
+ result = subprocess.run(
+ cmd,
+ capture_output=True,
+ text=True,
+ timeout=30,
+ check=False,
+ env=get_soffice_env(),
+ )
+ except subprocess.TimeoutExpired:
+ return (
+ None,
+ f"Successfully accepted all tracked changes: {input_file} -> {output_file}",
+ )
+
+ if result.returncode != 0:
+ return None, f"Error: LibreOffice failed: {result.stderr}"
+
+ return (
+ None,
+ f"Successfully accepted all tracked changes: {input_file} -> {output_file}",
+ )
+
+
+def _setup_libreoffice_macro() -> bool:
+ macro_dir = Path(MACRO_DIR)
+ macro_file = macro_dir / "Module1.xba"
+
+ if macro_file.exists() and "AcceptAllTrackedChanges" in macro_file.read_text():
+ return True
+
+ if not macro_dir.exists():
+ subprocess.run(
+ [
+ "soffice",
+ "--headless",
+ f"-env:UserInstallation=file://{LIBREOFFICE_PROFILE}",
+ "--terminate_after_init",
+ ],
+ capture_output=True,
+ timeout=10,
+ check=False,
+ env=get_soffice_env(),
+ )
+ macro_dir.mkdir(parents=True, exist_ok=True)
+
+ try:
+ macro_file.write_text(ACCEPT_CHANGES_MACRO)
+ return True
+ except Exception as e:
+ logger.warning(f"Failed to setup LibreOffice macro: {e}")
+ return False
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Accept all tracked changes in a DOCX file"
+ )
+ parser.add_argument("input_file", help="Input DOCX file with tracked changes")
+ parser.add_argument(
+ "output_file", help="Output DOCX file (clean, no tracked changes)"
+ )
+ args = parser.parse_args()
+
+ _, message = accept_changes(args.input_file, args.output_file)
+ print(message)
+
+ if "Error" in message:
+ raise SystemExit(1)
diff --git a/.claude/skills/docx/scripts/comment.py b/.claude/skills/docx/scripts/comment.py
new file mode 100644
index 0000000..36e1c93
--- /dev/null
+++ b/.claude/skills/docx/scripts/comment.py
@@ -0,0 +1,318 @@
+"""Add comments to DOCX documents.
+
+Usage:
+ python comment.py unpacked/ 0 "Comment text"
+ python comment.py unpacked/ 1 "Reply text" --parent 0
+
+Text should be pre-escaped XML (e.g., & for &, ’ for smart quotes).
+
+After running, add markers to document.xml:
+
+ ... commented content ...
+
+
+"""
+
+import argparse
+import random
+import shutil
+import sys
+from datetime import datetime, timezone
+from pathlib import Path
+
+import defusedxml.minidom
+
+TEMPLATE_DIR = Path(__file__).parent / "templates"
+NS = {
+ "w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main",
+ "w14": "http://schemas.microsoft.com/office/word/2010/wordml",
+ "w15": "http://schemas.microsoft.com/office/word/2012/wordml",
+ "w16cid": "http://schemas.microsoft.com/office/word/2016/wordml/cid",
+ "w16cex": "http://schemas.microsoft.com/office/word/2018/wordml/cex",
+}
+
+COMMENT_XML = """\
+
+
+
+
+
+
+
+
+
+
+
+
+ {text}
+
+
+ """
+
+COMMENT_MARKER_TEMPLATE = """
+Add to document.xml (markers must be direct children of w:p, never inside w:r):
+
+ ...
+
+ """
+
+REPLY_MARKER_TEMPLATE = """
+Nest markers inside parent {pid}'s markers (markers must be direct children of w:p, never inside w:r):
+
+ ...
+
+
+ """
+
+
+def _generate_hex_id() -> str:
+ return f"{random.randint(0, 0x7FFFFFFE):08X}"
+
+
+SMART_QUOTE_ENTITIES = {
+ "\u201c": "“",
+ "\u201d": "”",
+ "\u2018": "‘",
+ "\u2019": "’",
+}
+
+
+def _encode_smart_quotes(text: str) -> str:
+ for char, entity in SMART_QUOTE_ENTITIES.items():
+ text = text.replace(char, entity)
+ return text
+
+
+def _append_xml(xml_path: Path, root_tag: str, content: str) -> None:
+ dom = defusedxml.minidom.parseString(xml_path.read_text(encoding="utf-8"))
+ root = dom.getElementsByTagName(root_tag)[0]
+ ns_attrs = " ".join(f'xmlns:{k}="{v}"' for k, v in NS.items())
+ wrapper_dom = defusedxml.minidom.parseString(f"{content} ")
+ for child in wrapper_dom.documentElement.childNodes:
+ if child.nodeType == child.ELEMENT_NODE:
+ root.appendChild(dom.importNode(child, True))
+ output = _encode_smart_quotes(dom.toxml(encoding="UTF-8").decode("utf-8"))
+ xml_path.write_text(output, encoding="utf-8")
+
+
+def _find_para_id(comments_path: Path, comment_id: int) -> str | None:
+ dom = defusedxml.minidom.parseString(comments_path.read_text(encoding="utf-8"))
+ for c in dom.getElementsByTagName("w:comment"):
+ if c.getAttribute("w:id") == str(comment_id):
+ for p in c.getElementsByTagName("w:p"):
+ if pid := p.getAttribute("w14:paraId"):
+ return pid
+ return None
+
+
+def _get_next_rid(rels_path: Path) -> int:
+ dom = defusedxml.minidom.parseString(rels_path.read_text(encoding="utf-8"))
+ max_rid = 0
+ for rel in dom.getElementsByTagName("Relationship"):
+ rid = rel.getAttribute("Id")
+ if rid and rid.startswith("rId"):
+ try:
+ max_rid = max(max_rid, int(rid[3:]))
+ except ValueError:
+ pass
+ return max_rid + 1
+
+
+def _has_relationship(rels_path: Path, target: str) -> bool:
+ dom = defusedxml.minidom.parseString(rels_path.read_text(encoding="utf-8"))
+ for rel in dom.getElementsByTagName("Relationship"):
+ if rel.getAttribute("Target") == target:
+ return True
+ return False
+
+
+def _has_content_type(ct_path: Path, part_name: str) -> bool:
+ dom = defusedxml.minidom.parseString(ct_path.read_text(encoding="utf-8"))
+ for override in dom.getElementsByTagName("Override"):
+ if override.getAttribute("PartName") == part_name:
+ return True
+ return False
+
+
+def _ensure_comment_relationships(unpacked_dir: Path) -> None:
+ rels_path = unpacked_dir / "word" / "_rels" / "document.xml.rels"
+ if not rels_path.exists():
+ return
+
+ if _has_relationship(rels_path, "comments.xml"):
+ return
+
+ dom = defusedxml.minidom.parseString(rels_path.read_text(encoding="utf-8"))
+ root = dom.documentElement
+ next_rid = _get_next_rid(rels_path)
+
+ rels = [
+ (
+ "http://schemas.openxmlformats.org/officeDocument/2006/relationships/comments",
+ "comments.xml",
+ ),
+ (
+ "http://schemas.microsoft.com/office/2011/relationships/commentsExtended",
+ "commentsExtended.xml",
+ ),
+ (
+ "http://schemas.microsoft.com/office/2016/09/relationships/commentsIds",
+ "commentsIds.xml",
+ ),
+ (
+ "http://schemas.microsoft.com/office/2018/08/relationships/commentsExtensible",
+ "commentsExtensible.xml",
+ ),
+ ]
+
+ for rel_type, target in rels:
+ rel = dom.createElement("Relationship")
+ rel.setAttribute("Id", f"rId{next_rid}")
+ rel.setAttribute("Type", rel_type)
+ rel.setAttribute("Target", target)
+ root.appendChild(rel)
+ next_rid += 1
+
+ rels_path.write_bytes(dom.toxml(encoding="UTF-8"))
+
+
+def _ensure_comment_content_types(unpacked_dir: Path) -> None:
+ ct_path = unpacked_dir / "[Content_Types].xml"
+ if not ct_path.exists():
+ return
+
+ if _has_content_type(ct_path, "/word/comments.xml"):
+ return
+
+ dom = defusedxml.minidom.parseString(ct_path.read_text(encoding="utf-8"))
+ root = dom.documentElement
+
+ overrides = [
+ (
+ "/word/comments.xml",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml",
+ ),
+ (
+ "/word/commentsExtended.xml",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtended+xml",
+ ),
+ (
+ "/word/commentsIds.xml",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsIds+xml",
+ ),
+ (
+ "/word/commentsExtensible.xml",
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.commentsExtensible+xml",
+ ),
+ ]
+
+ for part_name, content_type in overrides:
+ override = dom.createElement("Override")
+ override.setAttribute("PartName", part_name)
+ override.setAttribute("ContentType", content_type)
+ root.appendChild(override)
+
+ ct_path.write_bytes(dom.toxml(encoding="UTF-8"))
+
+
+def add_comment(
+ unpacked_dir: str,
+ comment_id: int,
+ text: str,
+ author: str = "Claude",
+ initials: str = "C",
+ parent_id: int | None = None,
+) -> tuple[str, str]:
+ word = Path(unpacked_dir) / "word"
+ if not word.exists():
+ return "", f"Error: {word} not found"
+
+ para_id, durable_id = _generate_hex_id(), _generate_hex_id()
+ ts = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ comments = word / "comments.xml"
+ first_comment = not comments.exists()
+ if first_comment:
+ shutil.copy(TEMPLATE_DIR / "comments.xml", comments)
+ _ensure_comment_relationships(Path(unpacked_dir))
+ _ensure_comment_content_types(Path(unpacked_dir))
+ _append_xml(
+ comments,
+ "w:comments",
+ COMMENT_XML.format(
+ id=comment_id,
+ author=author,
+ date=ts,
+ initials=initials,
+ para_id=para_id,
+ text=text,
+ ),
+ )
+
+ ext = word / "commentsExtended.xml"
+ if not ext.exists():
+ shutil.copy(TEMPLATE_DIR / "commentsExtended.xml", ext)
+ if parent_id is not None:
+ parent_para = _find_para_id(comments, parent_id)
+ if not parent_para:
+ return "", f"Error: Parent comment {parent_id} not found"
+ _append_xml(
+ ext,
+ "w15:commentsEx",
+ f' ',
+ )
+ else:
+ _append_xml(
+ ext,
+ "w15:commentsEx",
+ f' ',
+ )
+
+ ids = word / "commentsIds.xml"
+ if not ids.exists():
+ shutil.copy(TEMPLATE_DIR / "commentsIds.xml", ids)
+ _append_xml(
+ ids,
+ "w16cid:commentsIds",
+ f' ',
+ )
+
+ extensible = word / "commentsExtensible.xml"
+ if not extensible.exists():
+ shutil.copy(TEMPLATE_DIR / "commentsExtensible.xml", extensible)
+ _append_xml(
+ extensible,
+ "w16cex:commentsExtensible",
+ f' ',
+ )
+
+ action = "reply" if parent_id is not None else "comment"
+ return para_id, f"Added {action} {comment_id} (para_id={para_id})"
+
+
+if __name__ == "__main__":
+ p = argparse.ArgumentParser(description="Add comments to DOCX documents")
+ p.add_argument("unpacked_dir", help="Unpacked DOCX directory")
+ p.add_argument("comment_id", type=int, help="Comment ID (must be unique)")
+ p.add_argument("text", help="Comment text")
+ p.add_argument("--author", default="Claude", help="Author name")
+ p.add_argument("--initials", default="C", help="Author initials")
+ p.add_argument("--parent", type=int, help="Parent comment ID (for replies)")
+ args = p.parse_args()
+
+ para_id, msg = add_comment(
+ args.unpacked_dir,
+ args.comment_id,
+ args.text,
+ args.author,
+ args.initials,
+ args.parent,
+ )
+ print(msg)
+ if "Error" in msg:
+ sys.exit(1)
+ cid = args.comment_id
+ if args.parent is not None:
+ print(REPLY_MARKER_TEMPLATE.format(pid=args.parent, cid=cid))
+ else:
+ print(COMMENT_MARKER_TEMPLATE.format(cid=cid))
diff --git a/.claude/skills/docx/scripts/office/helpers/__init__.py b/.claude/skills/docx/scripts/office/helpers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.claude/skills/docx/scripts/office/helpers/merge_runs.py b/.claude/skills/docx/scripts/office/helpers/merge_runs.py
new file mode 100644
index 0000000..ad7c25e
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/helpers/merge_runs.py
@@ -0,0 +1,199 @@
+"""Merge adjacent runs with identical formatting in DOCX.
+
+Merges adjacent elements that have identical properties.
+Works on runs in paragraphs and inside tracked changes (, ).
+
+Also:
+- Removes rsid attributes from runs (revision metadata that doesn't affect rendering)
+- Removes proofErr elements (spell/grammar markers that block merging)
+"""
+
+from pathlib import Path
+
+import defusedxml.minidom
+
+
+def merge_runs(input_dir: str) -> tuple[int, str]:
+ doc_xml = Path(input_dir) / "word" / "document.xml"
+
+ if not doc_xml.exists():
+ return 0, f"Error: {doc_xml} not found"
+
+ try:
+ dom = defusedxml.minidom.parseString(doc_xml.read_text(encoding="utf-8"))
+ root = dom.documentElement
+
+ _remove_elements(root, "proofErr")
+ _strip_run_rsid_attrs(root)
+
+ containers = {run.parentNode for run in _find_elements(root, "r")}
+
+ merge_count = 0
+ for container in containers:
+ merge_count += _merge_runs_in(container)
+
+ doc_xml.write_bytes(dom.toxml(encoding="UTF-8"))
+ return merge_count, f"Merged {merge_count} runs"
+
+ except Exception as e:
+ return 0, f"Error: {e}"
+
+
+
+
+def _find_elements(root, tag: str) -> list:
+ results = []
+
+ def traverse(node):
+ if node.nodeType == node.ELEMENT_NODE:
+ name = node.localName or node.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(node)
+ for child in node.childNodes:
+ traverse(child)
+
+ traverse(root)
+ return results
+
+
+def _get_child(parent, tag: str):
+ for child in parent.childNodes:
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ return child
+ return None
+
+
+def _get_children(parent, tag: str) -> list:
+ results = []
+ for child in parent.childNodes:
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(child)
+ return results
+
+
+def _is_adjacent(elem1, elem2) -> bool:
+ node = elem1.nextSibling
+ while node:
+ if node == elem2:
+ return True
+ if node.nodeType == node.ELEMENT_NODE:
+ return False
+ if node.nodeType == node.TEXT_NODE and node.data.strip():
+ return False
+ node = node.nextSibling
+ return False
+
+
+
+
+def _remove_elements(root, tag: str):
+ for elem in _find_elements(root, tag):
+ if elem.parentNode:
+ elem.parentNode.removeChild(elem)
+
+
+def _strip_run_rsid_attrs(root):
+ for run in _find_elements(root, "r"):
+ for attr in list(run.attributes.values()):
+ if "rsid" in attr.name.lower():
+ run.removeAttribute(attr.name)
+
+
+
+
+def _merge_runs_in(container) -> int:
+ merge_count = 0
+ run = _first_child_run(container)
+
+ while run:
+ while True:
+ next_elem = _next_element_sibling(run)
+ if next_elem and _is_run(next_elem) and _can_merge(run, next_elem):
+ _merge_run_content(run, next_elem)
+ container.removeChild(next_elem)
+ merge_count += 1
+ else:
+ break
+
+ _consolidate_text(run)
+ run = _next_sibling_run(run)
+
+ return merge_count
+
+
+def _first_child_run(container):
+ for child in container.childNodes:
+ if child.nodeType == child.ELEMENT_NODE and _is_run(child):
+ return child
+ return None
+
+
+def _next_element_sibling(node):
+ sibling = node.nextSibling
+ while sibling:
+ if sibling.nodeType == sibling.ELEMENT_NODE:
+ return sibling
+ sibling = sibling.nextSibling
+ return None
+
+
+def _next_sibling_run(node):
+ sibling = node.nextSibling
+ while sibling:
+ if sibling.nodeType == sibling.ELEMENT_NODE:
+ if _is_run(sibling):
+ return sibling
+ sibling = sibling.nextSibling
+ return None
+
+
+def _is_run(node) -> bool:
+ name = node.localName or node.tagName
+ return name == "r" or name.endswith(":r")
+
+
+def _can_merge(run1, run2) -> bool:
+ rpr1 = _get_child(run1, "rPr")
+ rpr2 = _get_child(run2, "rPr")
+
+ if (rpr1 is None) != (rpr2 is None):
+ return False
+ if rpr1 is None:
+ return True
+ return rpr1.toxml() == rpr2.toxml()
+
+
+def _merge_run_content(target, source):
+ for child in list(source.childNodes):
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name != "rPr" and not name.endswith(":rPr"):
+ target.appendChild(child)
+
+
+def _consolidate_text(run):
+ t_elements = _get_children(run, "t")
+
+ for i in range(len(t_elements) - 1, 0, -1):
+ curr, prev = t_elements[i], t_elements[i - 1]
+
+ if _is_adjacent(prev, curr):
+ prev_text = prev.firstChild.data if prev.firstChild else ""
+ curr_text = curr.firstChild.data if curr.firstChild else ""
+ merged = prev_text + curr_text
+
+ if prev.firstChild:
+ prev.firstChild.data = merged
+ else:
+ prev.appendChild(run.ownerDocument.createTextNode(merged))
+
+ if merged.startswith(" ") or merged.endswith(" "):
+ prev.setAttribute("xml:space", "preserve")
+ elif prev.hasAttribute("xml:space"):
+ prev.removeAttribute("xml:space")
+
+ run.removeChild(curr)
diff --git a/.claude/skills/docx/scripts/office/helpers/simplify_redlines.py b/.claude/skills/docx/scripts/office/helpers/simplify_redlines.py
new file mode 100644
index 0000000..db963bb
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/helpers/simplify_redlines.py
@@ -0,0 +1,197 @@
+"""Simplify tracked changes by merging adjacent w:ins or w:del elements.
+
+Merges adjacent elements from the same author into a single element.
+Same for elements. This makes heavily-redlined documents easier to
+work with by reducing the number of tracked change wrappers.
+
+Rules:
+- Only merges w:ins with w:ins, w:del with w:del (same element type)
+- Only merges if same author (ignores timestamp differences)
+- Only merges if truly adjacent (only whitespace between them)
+"""
+
+import xml.etree.ElementTree as ET
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+WORD_NS = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+
+
+def simplify_redlines(input_dir: str) -> tuple[int, str]:
+ doc_xml = Path(input_dir) / "word" / "document.xml"
+
+ if not doc_xml.exists():
+ return 0, f"Error: {doc_xml} not found"
+
+ try:
+ dom = defusedxml.minidom.parseString(doc_xml.read_text(encoding="utf-8"))
+ root = dom.documentElement
+
+ merge_count = 0
+
+ containers = _find_elements(root, "p") + _find_elements(root, "tc")
+
+ for container in containers:
+ merge_count += _merge_tracked_changes_in(container, "ins")
+ merge_count += _merge_tracked_changes_in(container, "del")
+
+ doc_xml.write_bytes(dom.toxml(encoding="UTF-8"))
+ return merge_count, f"Simplified {merge_count} tracked changes"
+
+ except Exception as e:
+ return 0, f"Error: {e}"
+
+
+def _merge_tracked_changes_in(container, tag: str) -> int:
+ merge_count = 0
+
+ tracked = [
+ child
+ for child in container.childNodes
+ if child.nodeType == child.ELEMENT_NODE and _is_element(child, tag)
+ ]
+
+ if len(tracked) < 2:
+ return 0
+
+ i = 0
+ while i < len(tracked) - 1:
+ curr = tracked[i]
+ next_elem = tracked[i + 1]
+
+ if _can_merge_tracked(curr, next_elem):
+ _merge_tracked_content(curr, next_elem)
+ container.removeChild(next_elem)
+ tracked.pop(i + 1)
+ merge_count += 1
+ else:
+ i += 1
+
+ return merge_count
+
+
+def _is_element(node, tag: str) -> bool:
+ name = node.localName or node.tagName
+ return name == tag or name.endswith(f":{tag}")
+
+
+def _get_author(elem) -> str:
+ author = elem.getAttribute("w:author")
+ if not author:
+ for attr in elem.attributes.values():
+ if attr.localName == "author" or attr.name.endswith(":author"):
+ return attr.value
+ return author
+
+
+def _can_merge_tracked(elem1, elem2) -> bool:
+ if _get_author(elem1) != _get_author(elem2):
+ return False
+
+ node = elem1.nextSibling
+ while node and node != elem2:
+ if node.nodeType == node.ELEMENT_NODE:
+ return False
+ if node.nodeType == node.TEXT_NODE and node.data.strip():
+ return False
+ node = node.nextSibling
+
+ return True
+
+
+def _merge_tracked_content(target, source):
+ while source.firstChild:
+ child = source.firstChild
+ source.removeChild(child)
+ target.appendChild(child)
+
+
+def _find_elements(root, tag: str) -> list:
+ results = []
+
+ def traverse(node):
+ if node.nodeType == node.ELEMENT_NODE:
+ name = node.localName or node.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(node)
+ for child in node.childNodes:
+ traverse(child)
+
+ traverse(root)
+ return results
+
+
+def get_tracked_change_authors(doc_xml_path: Path) -> dict[str, int]:
+ if not doc_xml_path.exists():
+ return {}
+
+ try:
+ tree = ET.parse(doc_xml_path)
+ root = tree.getroot()
+ except ET.ParseError:
+ return {}
+
+ namespaces = {"w": WORD_NS}
+ author_attr = f"{{{WORD_NS}}}author"
+
+ authors: dict[str, int] = {}
+ for tag in ["ins", "del"]:
+ for elem in root.findall(f".//w:{tag}", namespaces):
+ author = elem.get(author_attr)
+ if author:
+ authors[author] = authors.get(author, 0) + 1
+
+ return authors
+
+
+def _get_authors_from_docx(docx_path: Path) -> dict[str, int]:
+ try:
+ with zipfile.ZipFile(docx_path, "r") as zf:
+ if "word/document.xml" not in zf.namelist():
+ return {}
+ with zf.open("word/document.xml") as f:
+ tree = ET.parse(f)
+ root = tree.getroot()
+
+ namespaces = {"w": WORD_NS}
+ author_attr = f"{{{WORD_NS}}}author"
+
+ authors: dict[str, int] = {}
+ for tag in ["ins", "del"]:
+ for elem in root.findall(f".//w:{tag}", namespaces):
+ author = elem.get(author_attr)
+ if author:
+ authors[author] = authors.get(author, 0) + 1
+ return authors
+ except (zipfile.BadZipFile, ET.ParseError):
+ return {}
+
+
+def infer_author(modified_dir: Path, original_docx: Path, default: str = "Claude") -> str:
+ modified_xml = modified_dir / "word" / "document.xml"
+ modified_authors = get_tracked_change_authors(modified_xml)
+
+ if not modified_authors:
+ return default
+
+ original_authors = _get_authors_from_docx(original_docx)
+
+ new_changes: dict[str, int] = {}
+ for author, count in modified_authors.items():
+ original_count = original_authors.get(author, 0)
+ diff = count - original_count
+ if diff > 0:
+ new_changes[author] = diff
+
+ if not new_changes:
+ return default
+
+ if len(new_changes) == 1:
+ return next(iter(new_changes))
+
+ raise ValueError(
+ f"Multiple authors added new changes: {new_changes}. "
+ "Cannot infer which author to validate."
+ )
diff --git a/.claude/skills/docx/scripts/office/pack.py b/.claude/skills/docx/scripts/office/pack.py
new file mode 100644
index 0000000..db29ed8
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/pack.py
@@ -0,0 +1,159 @@
+"""Pack a directory into a DOCX, PPTX, or XLSX file.
+
+Validates with auto-repair, condenses XML formatting, and creates the Office file.
+
+Usage:
+ python pack.py [--original ] [--validate true|false]
+
+Examples:
+ python pack.py unpacked/ output.docx --original input.docx
+ python pack.py unpacked/ output.pptx --validate false
+"""
+
+import argparse
+import sys
+import shutil
+import tempfile
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+from validators import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator
+
+def pack(
+ input_directory: str,
+ output_file: str,
+ original_file: str | None = None,
+ validate: bool = True,
+ infer_author_func=None,
+) -> tuple[None, str]:
+ input_dir = Path(input_directory)
+ output_path = Path(output_file)
+ suffix = output_path.suffix.lower()
+
+ if not input_dir.is_dir():
+ return None, f"Error: {input_dir} is not a directory"
+
+ if suffix not in {".docx", ".pptx", ".xlsx"}:
+ return None, f"Error: {output_file} must be a .docx, .pptx, or .xlsx file"
+
+ if validate and original_file:
+ original_path = Path(original_file)
+ if original_path.exists():
+ success, output = _run_validation(
+ input_dir, original_path, suffix, infer_author_func
+ )
+ if output:
+ print(output)
+ if not success:
+ return None, f"Error: Validation failed for {input_dir}"
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_content_dir = Path(temp_dir) / "content"
+ shutil.copytree(input_dir, temp_content_dir)
+
+ for pattern in ["*.xml", "*.rels"]:
+ for xml_file in temp_content_dir.rglob(pattern):
+ _condense_xml(xml_file)
+
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as zf:
+ for f in temp_content_dir.rglob("*"):
+ if f.is_file():
+ zf.write(f, f.relative_to(temp_content_dir))
+
+ return None, f"Successfully packed {input_dir} to {output_file}"
+
+
+def _run_validation(
+ unpacked_dir: Path,
+ original_file: Path,
+ suffix: str,
+ infer_author_func=None,
+) -> tuple[bool, str | None]:
+ output_lines = []
+ validators = []
+
+ if suffix == ".docx":
+ author = "Claude"
+ if infer_author_func:
+ try:
+ author = infer_author_func(unpacked_dir, original_file)
+ except ValueError as e:
+ print(f"Warning: {e} Using default author 'Claude'.", file=sys.stderr)
+
+ validators = [
+ DOCXSchemaValidator(unpacked_dir, original_file),
+ RedliningValidator(unpacked_dir, original_file, author=author),
+ ]
+ elif suffix == ".pptx":
+ validators = [PPTXSchemaValidator(unpacked_dir, original_file)]
+
+ if not validators:
+ return True, None
+
+ total_repairs = sum(v.repair() for v in validators)
+ if total_repairs:
+ output_lines.append(f"Auto-repaired {total_repairs} issue(s)")
+
+ success = all(v.validate() for v in validators)
+
+ if success:
+ output_lines.append("All validations PASSED!")
+
+ return success, "\n".join(output_lines) if output_lines else None
+
+
+def _condense_xml(xml_file: Path) -> None:
+ try:
+ with open(xml_file, encoding="utf-8") as f:
+ dom = defusedxml.minidom.parse(f)
+
+ for element in dom.getElementsByTagName("*"):
+ if element.tagName.endswith(":t"):
+ continue
+
+ for child in list(element.childNodes):
+ if (
+ child.nodeType == child.TEXT_NODE
+ and child.nodeValue
+ and child.nodeValue.strip() == ""
+ ) or child.nodeType == child.COMMENT_NODE:
+ element.removeChild(child)
+
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+ except Exception as e:
+ print(f"ERROR: Failed to parse {xml_file.name}: {e}", file=sys.stderr)
+ raise
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Pack a directory into a DOCX, PPTX, or XLSX file"
+ )
+ parser.add_argument("input_directory", help="Unpacked Office document directory")
+ parser.add_argument("output_file", help="Output Office file (.docx/.pptx/.xlsx)")
+ parser.add_argument(
+ "--original",
+ help="Original file for validation comparison",
+ )
+ parser.add_argument(
+ "--validate",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Run validation with auto-repair (default: true)",
+ )
+ args = parser.parse_args()
+
+ _, message = pack(
+ args.input_directory,
+ args.output_file,
+ original_file=args.original,
+ validate=args.validate,
+ )
+ print(message)
+
+ if "Error" in message:
+ sys.exit(1)
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
new file mode 100644
index 0000000..6454ef9
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
@@ -0,0 +1,1499 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
new file mode 100644
index 0000000..afa4f46
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
new file mode 100644
index 0000000..64e66b8
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
@@ -0,0 +1,1085 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
new file mode 100644
index 0000000..687eea8
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
@@ -0,0 +1,11 @@
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
new file mode 100644
index 0000000..6ac81b0
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
@@ -0,0 +1,3081 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
new file mode 100644
index 0000000..1dbf051
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
@@ -0,0 +1,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
new file mode 100644
index 0000000..f1af17d
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
@@ -0,0 +1,185 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
new file mode 100644
index 0000000..0a185ab
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
@@ -0,0 +1,287 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
new file mode 100644
index 0000000..14ef488
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
@@ -0,0 +1,1676 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
new file mode 100644
index 0000000..c20f3bf
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
new file mode 100644
index 0000000..ac60252
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
@@ -0,0 +1,144 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
new file mode 100644
index 0000000..424b8ba
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
@@ -0,0 +1,174 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
new file mode 100644
index 0000000..2bddce2
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
new file mode 100644
index 0000000..8a8c18b
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
new file mode 100644
index 0000000..5c42706
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
@@ -0,0 +1,59 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
new file mode 100644
index 0000000..853c341
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
@@ -0,0 +1,56 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
new file mode 100644
index 0000000..da835ee
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
@@ -0,0 +1,195 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
new file mode 100644
index 0000000..87ad265
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
@@ -0,0 +1,582 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
new file mode 100644
index 0000000..9e86f1b
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
new file mode 100644
index 0000000..d0be42e
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
@@ -0,0 +1,4439 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
new file mode 100644
index 0000000..8821dd1
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
@@ -0,0 +1,570 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
new file mode 100644
index 0000000..ca2575c
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
@@ -0,0 +1,509 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
new file mode 100644
index 0000000..dd079e6
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
new file mode 100644
index 0000000..3dd6cf6
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
@@ -0,0 +1,108 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
new file mode 100644
index 0000000..f1041e3
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
@@ -0,0 +1,96 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
new file mode 100644
index 0000000..9c5b7a6
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
@@ -0,0 +1,3646 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
new file mode 100644
index 0000000..0f13678
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
@@ -0,0 +1,116 @@
+
+
+
+
+
+ See http://www.w3.org/XML/1998/namespace.html and
+ http://www.w3.org/TR/REC-xml for information about this namespace.
+
+ This schema document describes the XML namespace, in a form
+ suitable for import by other schema documents.
+
+ Note that local names in this namespace are intended to be defined
+ only by the World Wide Web Consortium or its subgroups. The
+ following names are currently defined in this namespace and should
+ not be used with conflicting semantics by any Working Group,
+ specification, or document instance:
+
+ base (as an attribute name): denotes an attribute whose value
+ provides a URI to be used as the base for interpreting any
+ relative URIs in the scope of the element on which it
+ appears; its value is inherited. This name is reserved
+ by virtue of its definition in the XML Base specification.
+
+ lang (as an attribute name): denotes an attribute whose value
+ is a language code for the natural language of the content of
+ any element; its value is inherited. This name is reserved
+ by virtue of its definition in the XML specification.
+
+ space (as an attribute name): denotes an attribute whose
+ value is a keyword indicating what whitespace processing
+ discipline is intended for the content of the element; its
+ value is inherited. This name is reserved by virtue of its
+ definition in the XML specification.
+
+ Father (in any context at all): denotes Jon Bosak, the chair of
+ the original XML Working Group. This name is reserved by
+ the following decision of the W3C XML Plenary and
+ XML Coordination groups:
+
+ In appreciation for his vision, leadership and dedication
+ the W3C XML Plenary on this 10th day of February, 2000
+ reserves for Jon Bosak in perpetuity the XML name
+ xml:Father
+
+
+
+
+ This schema defines attributes and an attribute group
+ suitable for use by
+ schemas wishing to allow xml:base, xml:lang or xml:space attributes
+ on elements they define.
+
+ To enable this, such a schema must import this schema
+ for the XML namespace, e.g. as follows:
+ <schema . . .>
+ . . .
+ <import namespace="http://www.w3.org/XML/1998/namespace"
+ schemaLocation="http://www.w3.org/2001/03/xml.xsd"/>
+
+ Subsequently, qualified reference to any of the attributes
+ or the group defined below will have the desired effect, e.g.
+
+ <type . . .>
+ . . .
+ <attributeGroup ref="xml:specialAttrs"/>
+
+ will define a type which will schema-validate an instance
+ element with any of those attributes
+
+
+
+ In keeping with the XML Schema WG's standard versioning
+ policy, this schema document will persist at
+ http://www.w3.org/2001/03/xml.xsd.
+ At the date of issue it can also be found at
+ http://www.w3.org/2001/xml.xsd.
+ The schema document at that URI may however change in the future,
+ in order to remain compatible with the latest version of XML Schema
+ itself. In other words, if the XML Schema namespace changes, the version
+ of this document at
+ http://www.w3.org/2001/xml.xsd will change
+ accordingly; the version at
+ http://www.w3.org/2001/03/xml.xsd will not change.
+
+
+
+
+
+ In due course, we should install the relevant ISO 2- and 3-letter
+ codes as the enumerated possible values . . .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ See http://www.w3.org/TR/xmlbase/ for
+ information about this attribute.
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
new file mode 100644
index 0000000..a6de9d2
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
@@ -0,0 +1,42 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
new file mode 100644
index 0000000..10e978b
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
new file mode 100644
index 0000000..4248bf7
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
@@ -0,0 +1,49 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
new file mode 100644
index 0000000..5649746
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/mce/mc.xsd b/.claude/skills/docx/scripts/office/schemas/mce/mc.xsd
new file mode 100644
index 0000000..ef72545
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/mce/mc.xsd
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2010.xsd b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2010.xsd
new file mode 100644
index 0000000..f65f777
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2010.xsd
@@ -0,0 +1,560 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2012.xsd b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2012.xsd
new file mode 100644
index 0000000..6b00755
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2012.xsd
@@ -0,0 +1,67 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2018.xsd b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2018.xsd
new file mode 100644
index 0000000..f321d33
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-2018.xsd
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/microsoft/wml-cex-2018.xsd b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
new file mode 100644
index 0000000..364c6a9
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
@@ -0,0 +1,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/microsoft/wml-cid-2016.xsd b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
new file mode 100644
index 0000000..fed9d15
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
new file mode 100644
index 0000000..680cf15
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/schemas/microsoft/wml-symex-2015.xsd b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
new file mode 100644
index 0000000..89ada90
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/docx/scripts/office/soffice.py b/.claude/skills/docx/scripts/office/soffice.py
new file mode 100644
index 0000000..c7f7e32
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/soffice.py
@@ -0,0 +1,183 @@
+"""
+Helper for running LibreOffice (soffice) in environments where AF_UNIX
+sockets may be blocked (e.g., sandboxed VMs). Detects the restriction
+at runtime and applies an LD_PRELOAD shim if needed.
+
+Usage:
+ from office.soffice import run_soffice, get_soffice_env
+
+ # Option 1 – run soffice directly
+ result = run_soffice(["--headless", "--convert-to", "pdf", "input.docx"])
+
+ # Option 2 – get env dict for your own subprocess calls
+ env = get_soffice_env()
+ subprocess.run(["soffice", ...], env=env)
+"""
+
+import os
+import socket
+import subprocess
+import tempfile
+from pathlib import Path
+
+
+def get_soffice_env() -> dict:
+ env = os.environ.copy()
+ env["SAL_USE_VCLPLUGIN"] = "svp"
+
+ if _needs_shim():
+ shim = _ensure_shim()
+ env["LD_PRELOAD"] = str(shim)
+
+ return env
+
+
+def run_soffice(args: list[str], **kwargs) -> subprocess.CompletedProcess:
+ env = get_soffice_env()
+ return subprocess.run(["soffice"] + args, env=env, **kwargs)
+
+
+
+_SHIM_SO = Path(tempfile.gettempdir()) / "lo_socket_shim.so"
+
+
+def _needs_shim() -> bool:
+ try:
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.close()
+ return False
+ except OSError:
+ return True
+
+
+def _ensure_shim() -> Path:
+ if _SHIM_SO.exists():
+ return _SHIM_SO
+
+ src = Path(tempfile.gettempdir()) / "lo_socket_shim.c"
+ src.write_text(_SHIM_SOURCE)
+ subprocess.run(
+ ["gcc", "-shared", "-fPIC", "-o", str(_SHIM_SO), str(src), "-ldl"],
+ check=True,
+ capture_output=True,
+ )
+ src.unlink()
+ return _SHIM_SO
+
+
+
+_SHIM_SOURCE = r"""
+#define _GNU_SOURCE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+static int (*real_socket)(int, int, int);
+static int (*real_socketpair)(int, int, int, int[2]);
+static int (*real_listen)(int, int);
+static int (*real_accept)(int, struct sockaddr *, socklen_t *);
+static int (*real_close)(int);
+static int (*real_read)(int, void *, size_t);
+
+/* Per-FD bookkeeping (FDs >= 1024 are passed through unshimmed). */
+static int is_shimmed[1024];
+static int peer_of[1024];
+static int wake_r[1024]; /* accept() blocks reading this */
+static int wake_w[1024]; /* close() writes to this */
+static int listener_fd = -1; /* FD that received listen() */
+
+__attribute__((constructor))
+static void init(void) {
+ real_socket = dlsym(RTLD_NEXT, "socket");
+ real_socketpair = dlsym(RTLD_NEXT, "socketpair");
+ real_listen = dlsym(RTLD_NEXT, "listen");
+ real_accept = dlsym(RTLD_NEXT, "accept");
+ real_close = dlsym(RTLD_NEXT, "close");
+ real_read = dlsym(RTLD_NEXT, "read");
+ for (int i = 0; i < 1024; i++) {
+ peer_of[i] = -1;
+ wake_r[i] = -1;
+ wake_w[i] = -1;
+ }
+}
+
+/* ---- socket ---------------------------------------------------------- */
+int socket(int domain, int type, int protocol) {
+ if (domain == AF_UNIX) {
+ int fd = real_socket(domain, type, protocol);
+ if (fd >= 0) return fd;
+ /* socket(AF_UNIX) blocked – fall back to socketpair(). */
+ int sv[2];
+ if (real_socketpair(domain, type, protocol, sv) == 0) {
+ if (sv[0] >= 0 && sv[0] < 1024) {
+ is_shimmed[sv[0]] = 1;
+ peer_of[sv[0]] = sv[1];
+ int wp[2];
+ if (pipe(wp) == 0) {
+ wake_r[sv[0]] = wp[0];
+ wake_w[sv[0]] = wp[1];
+ }
+ }
+ return sv[0];
+ }
+ errno = EPERM;
+ return -1;
+ }
+ return real_socket(domain, type, protocol);
+}
+
+/* ---- listen ---------------------------------------------------------- */
+int listen(int sockfd, int backlog) {
+ if (sockfd >= 0 && sockfd < 1024 && is_shimmed[sockfd]) {
+ listener_fd = sockfd;
+ return 0;
+ }
+ return real_listen(sockfd, backlog);
+}
+
+/* ---- accept ---------------------------------------------------------- */
+int accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) {
+ if (sockfd >= 0 && sockfd < 1024 && is_shimmed[sockfd]) {
+ /* Block until close() writes to the wake pipe. */
+ if (wake_r[sockfd] >= 0) {
+ char buf;
+ real_read(wake_r[sockfd], &buf, 1);
+ }
+ errno = ECONNABORTED;
+ return -1;
+ }
+ return real_accept(sockfd, addr, addrlen);
+}
+
+/* ---- close ----------------------------------------------------------- */
+int close(int fd) {
+ if (fd >= 0 && fd < 1024 && is_shimmed[fd]) {
+ int was_listener = (fd == listener_fd);
+ is_shimmed[fd] = 0;
+
+ if (wake_w[fd] >= 0) { /* unblock accept() */
+ char c = 0;
+ write(wake_w[fd], &c, 1);
+ real_close(wake_w[fd]);
+ wake_w[fd] = -1;
+ }
+ if (wake_r[fd] >= 0) { real_close(wake_r[fd]); wake_r[fd] = -1; }
+ if (peer_of[fd] >= 0) { real_close(peer_of[fd]); peer_of[fd] = -1; }
+
+ if (was_listener)
+ _exit(0); /* conversion done – exit */
+ }
+ return real_close(fd);
+}
+"""
+
+
+
+if __name__ == "__main__":
+ import sys
+ result = run_soffice(sys.argv[1:])
+ sys.exit(result.returncode)
diff --git a/.claude/skills/docx/scripts/office/unpack.py b/.claude/skills/docx/scripts/office/unpack.py
new file mode 100644
index 0000000..0015253
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/unpack.py
@@ -0,0 +1,132 @@
+"""Unpack Office files (DOCX, PPTX, XLSX) for editing.
+
+Extracts the ZIP archive, pretty-prints XML files, and optionally:
+- Merges adjacent runs with identical formatting (DOCX only)
+- Simplifies adjacent tracked changes from same author (DOCX only)
+
+Usage:
+ python unpack.py [options]
+
+Examples:
+ python unpack.py document.docx unpacked/
+ python unpack.py presentation.pptx unpacked/
+ python unpack.py document.docx unpacked/ --merge-runs false
+"""
+
+import argparse
+import sys
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+from helpers.merge_runs import merge_runs as do_merge_runs
+from helpers.simplify_redlines import simplify_redlines as do_simplify_redlines
+
+SMART_QUOTE_REPLACEMENTS = {
+ "\u201c": "“",
+ "\u201d": "”",
+ "\u2018": "‘",
+ "\u2019": "’",
+}
+
+
+def unpack(
+ input_file: str,
+ output_directory: str,
+ merge_runs: bool = True,
+ simplify_redlines: bool = True,
+) -> tuple[None, str]:
+ input_path = Path(input_file)
+ output_path = Path(output_directory)
+ suffix = input_path.suffix.lower()
+
+ if not input_path.exists():
+ return None, f"Error: {input_file} does not exist"
+
+ if suffix not in {".docx", ".pptx", ".xlsx"}:
+ return None, f"Error: {input_file} must be a .docx, .pptx, or .xlsx file"
+
+ try:
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ with zipfile.ZipFile(input_path, "r") as zf:
+ zf.extractall(output_path)
+
+ xml_files = list(output_path.rglob("*.xml")) + list(output_path.rglob("*.rels"))
+ for xml_file in xml_files:
+ _pretty_print_xml(xml_file)
+
+ message = f"Unpacked {input_file} ({len(xml_files)} XML files)"
+
+ if suffix == ".docx":
+ if simplify_redlines:
+ simplify_count, _ = do_simplify_redlines(str(output_path))
+ message += f", simplified {simplify_count} tracked changes"
+
+ if merge_runs:
+ merge_count, _ = do_merge_runs(str(output_path))
+ message += f", merged {merge_count} runs"
+
+ for xml_file in xml_files:
+ _escape_smart_quotes(xml_file)
+
+ return None, message
+
+ except zipfile.BadZipFile:
+ return None, f"Error: {input_file} is not a valid Office file"
+ except Exception as e:
+ return None, f"Error unpacking: {e}"
+
+
+def _pretty_print_xml(xml_file: Path) -> None:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ xml_file.write_bytes(dom.toprettyxml(indent=" ", encoding="utf-8"))
+ except Exception:
+ pass
+
+
+def _escape_smart_quotes(xml_file: Path) -> None:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ for char, entity in SMART_QUOTE_REPLACEMENTS.items():
+ content = content.replace(char, entity)
+ xml_file.write_text(content, encoding="utf-8")
+ except Exception:
+ pass
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Unpack an Office file (DOCX, PPTX, XLSX) for editing"
+ )
+ parser.add_argument("input_file", help="Office file to unpack")
+ parser.add_argument("output_directory", help="Output directory")
+ parser.add_argument(
+ "--merge-runs",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Merge adjacent runs with identical formatting (DOCX only, default: true)",
+ )
+ parser.add_argument(
+ "--simplify-redlines",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Merge adjacent tracked changes from same author (DOCX only, default: true)",
+ )
+ args = parser.parse_args()
+
+ _, message = unpack(
+ args.input_file,
+ args.output_directory,
+ merge_runs=args.merge_runs,
+ simplify_redlines=args.simplify_redlines,
+ )
+ print(message)
+
+ if "Error" in message:
+ sys.exit(1)
diff --git a/.claude/skills/docx/scripts/office/validate.py b/.claude/skills/docx/scripts/office/validate.py
new file mode 100644
index 0000000..03b01f6
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/validate.py
@@ -0,0 +1,111 @@
+"""
+Command line tool to validate Office document XML files against XSD schemas and tracked changes.
+
+Usage:
+ python validate.py [--original ] [--auto-repair] [--author NAME]
+
+The first argument can be either:
+- An unpacked directory containing the Office document XML files
+- A packed Office file (.docx/.pptx/.xlsx) which will be unpacked to a temp directory
+
+Auto-repair fixes:
+- paraId/durableId values that exceed OOXML limits
+- Missing xml:space="preserve" on w:t elements with whitespace
+"""
+
+import argparse
+import sys
+import tempfile
+import zipfile
+from pathlib import Path
+
+from validators import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Validate Office document XML files")
+ parser.add_argument(
+ "path",
+ help="Path to unpacked directory or packed Office file (.docx/.pptx/.xlsx)",
+ )
+ parser.add_argument(
+ "--original",
+ required=False,
+ default=None,
+ help="Path to original file (.docx/.pptx/.xlsx). If omitted, all XSD errors are reported and redlining validation is skipped.",
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="Enable verbose output",
+ )
+ parser.add_argument(
+ "--auto-repair",
+ action="store_true",
+ help="Automatically repair common issues (hex IDs, whitespace preservation)",
+ )
+ parser.add_argument(
+ "--author",
+ default="Claude",
+ help="Author name for redlining validation (default: Claude)",
+ )
+ args = parser.parse_args()
+
+ path = Path(args.path)
+ assert path.exists(), f"Error: {path} does not exist"
+
+ original_file = None
+ if args.original:
+ original_file = Path(args.original)
+ assert original_file.is_file(), f"Error: {original_file} is not a file"
+ assert original_file.suffix.lower() in [".docx", ".pptx", ".xlsx"], (
+ f"Error: {original_file} must be a .docx, .pptx, or .xlsx file"
+ )
+
+ file_extension = (original_file or path).suffix.lower()
+ assert file_extension in [".docx", ".pptx", ".xlsx"], (
+ f"Error: Cannot determine file type from {path}. Use --original or provide a .docx/.pptx/.xlsx file."
+ )
+
+ if path.is_file() and path.suffix.lower() in [".docx", ".pptx", ".xlsx"]:
+ temp_dir = tempfile.mkdtemp()
+ with zipfile.ZipFile(path, "r") as zf:
+ zf.extractall(temp_dir)
+ unpacked_dir = Path(temp_dir)
+ else:
+ assert path.is_dir(), f"Error: {path} is not a directory or Office file"
+ unpacked_dir = path
+
+ match file_extension:
+ case ".docx":
+ validators = [
+ DOCXSchemaValidator(unpacked_dir, original_file, verbose=args.verbose),
+ ]
+ if original_file:
+ validators.append(
+ RedliningValidator(unpacked_dir, original_file, verbose=args.verbose, author=args.author)
+ )
+ case ".pptx":
+ validators = [
+ PPTXSchemaValidator(unpacked_dir, original_file, verbose=args.verbose),
+ ]
+ case _:
+ print(f"Error: Validation not supported for file type {file_extension}")
+ sys.exit(1)
+
+ if args.auto_repair:
+ total_repairs = sum(v.repair() for v in validators)
+ if total_repairs:
+ print(f"Auto-repaired {total_repairs} issue(s)")
+
+ success = all(v.validate() for v in validators)
+
+ if success:
+ print("All validations PASSED!")
+
+ sys.exit(0 if success else 1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/docx/scripts/office/validators/__init__.py b/.claude/skills/docx/scripts/office/validators/__init__.py
new file mode 100644
index 0000000..db092ec
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/validators/__init__.py
@@ -0,0 +1,15 @@
+"""
+Validation modules for Word document processing.
+"""
+
+from .base import BaseSchemaValidator
+from .docx import DOCXSchemaValidator
+from .pptx import PPTXSchemaValidator
+from .redlining import RedliningValidator
+
+__all__ = [
+ "BaseSchemaValidator",
+ "DOCXSchemaValidator",
+ "PPTXSchemaValidator",
+ "RedliningValidator",
+]
diff --git a/.claude/skills/docx/scripts/office/validators/base.py b/.claude/skills/docx/scripts/office/validators/base.py
new file mode 100644
index 0000000..db4a06a
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/validators/base.py
@@ -0,0 +1,847 @@
+"""
+Base validator with common validation logic for document files.
+"""
+
+import re
+from pathlib import Path
+
+import defusedxml.minidom
+import lxml.etree
+
+
+class BaseSchemaValidator:
+
+ IGNORED_VALIDATION_ERRORS = [
+ "hyphenationZone",
+ "purl.org/dc/terms",
+ ]
+
+ UNIQUE_ID_REQUIREMENTS = {
+ "comment": ("id", "file"),
+ "commentrangestart": ("id", "file"),
+ "commentrangeend": ("id", "file"),
+ "bookmarkstart": ("id", "file"),
+ "bookmarkend": ("id", "file"),
+ "sldid": ("id", "file"),
+ "sldmasterid": ("id", "global"),
+ "sldlayoutid": ("id", "global"),
+ "cm": ("authorid", "file"),
+ "sheet": ("sheetid", "file"),
+ "definedname": ("id", "file"),
+ "cxnsp": ("id", "file"),
+ "sp": ("id", "file"),
+ "pic": ("id", "file"),
+ "grpsp": ("id", "file"),
+ }
+
+ EXCLUDED_ID_CONTAINERS = {
+ "sectionlst",
+ }
+
+ ELEMENT_RELATIONSHIP_TYPES = {}
+
+ SCHEMA_MAPPINGS = {
+ "word": "ISO-IEC29500-4_2016/wml.xsd",
+ "ppt": "ISO-IEC29500-4_2016/pml.xsd",
+ "xl": "ISO-IEC29500-4_2016/sml.xsd",
+ "[Content_Types].xml": "ecma/fouth-edition/opc-contentTypes.xsd",
+ "app.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd",
+ "core.xml": "ecma/fouth-edition/opc-coreProperties.xsd",
+ "custom.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd",
+ ".rels": "ecma/fouth-edition/opc-relationships.xsd",
+ "people.xml": "microsoft/wml-2012.xsd",
+ "commentsIds.xml": "microsoft/wml-cid-2016.xsd",
+ "commentsExtensible.xml": "microsoft/wml-cex-2018.xsd",
+ "commentsExtended.xml": "microsoft/wml-2012.xsd",
+ "chart": "ISO-IEC29500-4_2016/dml-chart.xsd",
+ "theme": "ISO-IEC29500-4_2016/dml-main.xsd",
+ "drawing": "ISO-IEC29500-4_2016/dml-main.xsd",
+ }
+
+ MC_NAMESPACE = "http://schemas.openxmlformats.org/markup-compatibility/2006"
+ XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
+
+ PACKAGE_RELATIONSHIPS_NAMESPACE = (
+ "http://schemas.openxmlformats.org/package/2006/relationships"
+ )
+ OFFICE_RELATIONSHIPS_NAMESPACE = (
+ "http://schemas.openxmlformats.org/officeDocument/2006/relationships"
+ )
+ CONTENT_TYPES_NAMESPACE = (
+ "http://schemas.openxmlformats.org/package/2006/content-types"
+ )
+
+ MAIN_CONTENT_FOLDERS = {"word", "ppt", "xl"}
+
+ OOXML_NAMESPACES = {
+ "http://schemas.openxmlformats.org/officeDocument/2006/math",
+ "http://schemas.openxmlformats.org/officeDocument/2006/relationships",
+ "http://schemas.openxmlformats.org/schemaLibrary/2006/main",
+ "http://schemas.openxmlformats.org/drawingml/2006/main",
+ "http://schemas.openxmlformats.org/drawingml/2006/chart",
+ "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing",
+ "http://schemas.openxmlformats.org/drawingml/2006/diagram",
+ "http://schemas.openxmlformats.org/drawingml/2006/picture",
+ "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing",
+ "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing",
+ "http://schemas.openxmlformats.org/wordprocessingml/2006/main",
+ "http://schemas.openxmlformats.org/presentationml/2006/main",
+ "http://schemas.openxmlformats.org/spreadsheetml/2006/main",
+ "http://schemas.openxmlformats.org/officeDocument/2006/sharedTypes",
+ "http://www.w3.org/XML/1998/namespace",
+ }
+
+ def __init__(self, unpacked_dir, original_file=None, verbose=False):
+ self.unpacked_dir = Path(unpacked_dir).resolve()
+ self.original_file = Path(original_file) if original_file else None
+ self.verbose = verbose
+
+ self.schemas_dir = Path(__file__).parent.parent / "schemas"
+
+ patterns = ["*.xml", "*.rels"]
+ self.xml_files = [
+ f for pattern in patterns for f in self.unpacked_dir.rglob(pattern)
+ ]
+
+ if not self.xml_files:
+ print(f"Warning: No XML files found in {self.unpacked_dir}")
+
+ def validate(self):
+ raise NotImplementedError("Subclasses must implement the validate method")
+
+ def repair(self) -> int:
+ return self.repair_whitespace_preservation()
+
+ def repair_whitespace_preservation(self) -> int:
+ repairs = 0
+
+ for xml_file in self.xml_files:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ modified = False
+
+ for elem in dom.getElementsByTagName("*"):
+ if elem.tagName.endswith(":t") and elem.firstChild:
+ text = elem.firstChild.nodeValue
+ if text and (text.startswith((' ', '\t')) or text.endswith((' ', '\t'))):
+ if elem.getAttribute("xml:space") != "preserve":
+ elem.setAttribute("xml:space", "preserve")
+ text_preview = repr(text[:30]) + "..." if len(text) > 30 else repr(text)
+ print(f" Repaired: {xml_file.name}: Added xml:space='preserve' to {elem.tagName}: {text_preview}")
+ repairs += 1
+ modified = True
+
+ if modified:
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+
+ except Exception:
+ pass
+
+ return repairs
+
+ def validate_xml(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ try:
+ lxml.etree.parse(str(xml_file))
+ except lxml.etree.XMLSyntaxError as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {e.lineno}: {e.msg}"
+ )
+ except Exception as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Unexpected error: {str(e)}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} XML violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All XML files are well-formed")
+ return True
+
+ def validate_namespaces(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ declared = set(root.nsmap.keys()) - {None}
+
+ for attr_val in [
+ v for k, v in root.attrib.items() if k.endswith("Ignorable")
+ ]:
+ undeclared = set(attr_val.split()) - declared
+ errors.extend(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Namespace '{ns}' in Ignorable but not declared"
+ for ns in undeclared
+ )
+ except lxml.etree.XMLSyntaxError:
+ continue
+
+ if errors:
+ print(f"FAILED - {len(errors)} namespace issues:")
+ for error in errors:
+ print(error)
+ return False
+ if self.verbose:
+ print("PASSED - All namespace prefixes properly declared")
+ return True
+
+ def validate_unique_ids(self):
+ errors = []
+ global_ids = {}
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ file_ids = {}
+
+ mc_elements = root.xpath(
+ ".//mc:AlternateContent", namespaces={"mc": self.MC_NAMESPACE}
+ )
+ for elem in mc_elements:
+ elem.getparent().remove(elem)
+
+ for elem in root.iter():
+ tag = (
+ elem.tag.split("}")[-1].lower()
+ if "}" in elem.tag
+ else elem.tag.lower()
+ )
+
+ if tag in self.UNIQUE_ID_REQUIREMENTS:
+ in_excluded_container = any(
+ ancestor.tag.split("}")[-1].lower() in self.EXCLUDED_ID_CONTAINERS
+ for ancestor in elem.iterancestors()
+ )
+ if in_excluded_container:
+ continue
+
+ attr_name, scope = self.UNIQUE_ID_REQUIREMENTS[tag]
+
+ id_value = None
+ for attr, value in elem.attrib.items():
+ attr_local = (
+ attr.split("}")[-1].lower()
+ if "}" in attr
+ else attr.lower()
+ )
+ if attr_local == attr_name:
+ id_value = value
+ break
+
+ if id_value is not None:
+ if scope == "global":
+ if id_value in global_ids:
+ prev_file, prev_line, prev_tag = global_ids[
+ id_value
+ ]
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: Global ID '{id_value}' in <{tag}> "
+ f"already used in {prev_file} at line {prev_line} in <{prev_tag}>"
+ )
+ else:
+ global_ids[id_value] = (
+ xml_file.relative_to(self.unpacked_dir),
+ elem.sourceline,
+ tag,
+ )
+ elif scope == "file":
+ key = (tag, attr_name)
+ if key not in file_ids:
+ file_ids[key] = {}
+
+ if id_value in file_ids[key]:
+ prev_line = file_ids[key][id_value]
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: Duplicate {attr_name}='{id_value}' in <{tag}> "
+ f"(first occurrence at line {prev_line})"
+ )
+ else:
+ file_ids[key][id_value] = elem.sourceline
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} ID uniqueness violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All required IDs are unique")
+ return True
+
+ def validate_file_references(self):
+ errors = []
+
+ rels_files = list(self.unpacked_dir.rglob("*.rels"))
+
+ if not rels_files:
+ if self.verbose:
+ print("PASSED - No .rels files found")
+ return True
+
+ all_files = []
+ for file_path in self.unpacked_dir.rglob("*"):
+ if (
+ file_path.is_file()
+ and file_path.name != "[Content_Types].xml"
+ and not file_path.name.endswith(".rels")
+ ):
+ all_files.append(file_path.resolve())
+
+ all_referenced_files = set()
+
+ if self.verbose:
+ print(
+ f"Found {len(rels_files)} .rels files and {len(all_files)} target files"
+ )
+
+ for rels_file in rels_files:
+ try:
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+
+ rels_dir = rels_file.parent
+
+ referenced_files = set()
+ broken_refs = []
+
+ for rel in rels_root.findall(
+ ".//ns:Relationship",
+ namespaces={"ns": self.PACKAGE_RELATIONSHIPS_NAMESPACE},
+ ):
+ target = rel.get("Target")
+ if target and not target.startswith(
+ ("http", "mailto:")
+ ):
+ if target.startswith("/"):
+ target_path = self.unpacked_dir / target.lstrip("/")
+ elif rels_file.name == ".rels":
+ target_path = self.unpacked_dir / target
+ else:
+ base_dir = rels_dir.parent
+ target_path = base_dir / target
+
+ try:
+ target_path = target_path.resolve()
+ if target_path.exists() and target_path.is_file():
+ referenced_files.add(target_path)
+ all_referenced_files.add(target_path)
+ else:
+ broken_refs.append((target, rel.sourceline))
+ except (OSError, ValueError):
+ broken_refs.append((target, rel.sourceline))
+
+ if broken_refs:
+ rel_path = rels_file.relative_to(self.unpacked_dir)
+ for broken_ref, line_num in broken_refs:
+ errors.append(
+ f" {rel_path}: Line {line_num}: Broken reference to {broken_ref}"
+ )
+
+ except Exception as e:
+ rel_path = rels_file.relative_to(self.unpacked_dir)
+ errors.append(f" Error parsing {rel_path}: {e}")
+
+ unreferenced_files = set(all_files) - all_referenced_files
+
+ if unreferenced_files:
+ for unref_file in sorted(unreferenced_files):
+ unref_rel_path = unref_file.relative_to(self.unpacked_dir)
+ errors.append(f" Unreferenced file: {unref_rel_path}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} relationship validation errors:")
+ for error in errors:
+ print(error)
+ print(
+ "CRITICAL: These errors will cause the document to appear corrupt. "
+ + "Broken references MUST be fixed, "
+ + "and unreferenced files MUST be referenced or removed."
+ )
+ return False
+ else:
+ if self.verbose:
+ print(
+ "PASSED - All references are valid and all files are properly referenced"
+ )
+ return True
+
+ def validate_all_relationship_ids(self):
+ import lxml.etree
+
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.suffix == ".rels":
+ continue
+
+ rels_dir = xml_file.parent / "_rels"
+ rels_file = rels_dir / f"{xml_file.name}.rels"
+
+ if not rels_file.exists():
+ continue
+
+ try:
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+ rid_to_type = {}
+
+ for rel in rels_root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ ):
+ rid = rel.get("Id")
+ rel_type = rel.get("Type", "")
+ if rid:
+ if rid in rid_to_type:
+ rels_rel_path = rels_file.relative_to(self.unpacked_dir)
+ errors.append(
+ f" {rels_rel_path}: Line {rel.sourceline}: "
+ f"Duplicate relationship ID '{rid}' (IDs must be unique)"
+ )
+ type_name = (
+ rel_type.split("/")[-1] if "/" in rel_type else rel_type
+ )
+ rid_to_type[rid] = type_name
+
+ xml_root = lxml.etree.parse(str(xml_file)).getroot()
+
+ r_ns = self.OFFICE_RELATIONSHIPS_NAMESPACE
+ rid_attrs_to_check = ["id", "embed", "link"]
+ for elem in xml_root.iter():
+ for attr_name in rid_attrs_to_check:
+ rid_attr = elem.get(f"{{{r_ns}}}{attr_name}")
+ if not rid_attr:
+ continue
+ xml_rel_path = xml_file.relative_to(self.unpacked_dir)
+ elem_name = (
+ elem.tag.split("}")[-1] if "}" in elem.tag else elem.tag
+ )
+
+ if rid_attr not in rid_to_type:
+ errors.append(
+ f" {xml_rel_path}: Line {elem.sourceline}: "
+ f"<{elem_name}> r:{attr_name} references non-existent relationship '{rid_attr}' "
+ f"(valid IDs: {', '.join(sorted(rid_to_type.keys())[:5])}{'...' if len(rid_to_type) > 5 else ''})"
+ )
+ elif attr_name == "id" and self.ELEMENT_RELATIONSHIP_TYPES:
+ expected_type = self._get_expected_relationship_type(
+ elem_name
+ )
+ if expected_type:
+ actual_type = rid_to_type[rid_attr]
+ if expected_type not in actual_type.lower():
+ errors.append(
+ f" {xml_rel_path}: Line {elem.sourceline}: "
+ f"<{elem_name}> references '{rid_attr}' which points to '{actual_type}' "
+ f"but should point to a '{expected_type}' relationship"
+ )
+
+ except Exception as e:
+ xml_rel_path = xml_file.relative_to(self.unpacked_dir)
+ errors.append(f" Error processing {xml_rel_path}: {e}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} relationship ID reference errors:")
+ for error in errors:
+ print(error)
+ print("\nThese ID mismatches will cause the document to appear corrupt!")
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All relationship ID references are valid")
+ return True
+
+ def _get_expected_relationship_type(self, element_name):
+ elem_lower = element_name.lower()
+
+ if elem_lower in self.ELEMENT_RELATIONSHIP_TYPES:
+ return self.ELEMENT_RELATIONSHIP_TYPES[elem_lower]
+
+ if elem_lower.endswith("id") and len(elem_lower) > 2:
+ prefix = elem_lower[:-2]
+ if prefix.endswith("master"):
+ return prefix.lower()
+ elif prefix.endswith("layout"):
+ return prefix.lower()
+ else:
+ if prefix == "sld":
+ return "slide"
+ return prefix.lower()
+
+ if elem_lower.endswith("reference") and len(elem_lower) > 9:
+ prefix = elem_lower[:-9]
+ return prefix.lower()
+
+ return None
+
+ def validate_content_types(self):
+ errors = []
+
+ content_types_file = self.unpacked_dir / "[Content_Types].xml"
+ if not content_types_file.exists():
+ print("FAILED - [Content_Types].xml file not found")
+ return False
+
+ try:
+ root = lxml.etree.parse(str(content_types_file)).getroot()
+ declared_parts = set()
+ declared_extensions = set()
+
+ for override in root.findall(
+ f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Override"
+ ):
+ part_name = override.get("PartName")
+ if part_name is not None:
+ declared_parts.add(part_name.lstrip("/"))
+
+ for default in root.findall(
+ f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Default"
+ ):
+ extension = default.get("Extension")
+ if extension is not None:
+ declared_extensions.add(extension.lower())
+
+ declarable_roots = {
+ "sld",
+ "sldLayout",
+ "sldMaster",
+ "presentation",
+ "document",
+ "workbook",
+ "worksheet",
+ "theme",
+ }
+
+ media_extensions = {
+ "png": "image/png",
+ "jpg": "image/jpeg",
+ "jpeg": "image/jpeg",
+ "gif": "image/gif",
+ "bmp": "image/bmp",
+ "tiff": "image/tiff",
+ "wmf": "image/x-wmf",
+ "emf": "image/x-emf",
+ }
+
+ all_files = list(self.unpacked_dir.rglob("*"))
+ all_files = [f for f in all_files if f.is_file()]
+
+ for xml_file in self.xml_files:
+ path_str = str(xml_file.relative_to(self.unpacked_dir)).replace(
+ "\\", "/"
+ )
+
+ if any(
+ skip in path_str
+ for skip in [".rels", "[Content_Types]", "docProps/", "_rels/"]
+ ):
+ continue
+
+ try:
+ root_tag = lxml.etree.parse(str(xml_file)).getroot().tag
+ root_name = root_tag.split("}")[-1] if "}" in root_tag else root_tag
+
+ if root_name in declarable_roots and path_str not in declared_parts:
+ errors.append(
+ f" {path_str}: File with <{root_name}> root not declared in [Content_Types].xml"
+ )
+
+ except Exception:
+ continue
+
+ for file_path in all_files:
+ if file_path.suffix.lower() in {".xml", ".rels"}:
+ continue
+ if file_path.name == "[Content_Types].xml":
+ continue
+ if "_rels" in file_path.parts or "docProps" in file_path.parts:
+ continue
+
+ extension = file_path.suffix.lstrip(".").lower()
+ if extension and extension not in declared_extensions:
+ if extension in media_extensions:
+ relative_path = file_path.relative_to(self.unpacked_dir)
+ errors.append(
+ f' {relative_path}: File with extension \'{extension}\' not declared in [Content_Types].xml - should add: '
+ )
+
+ except Exception as e:
+ errors.append(f" Error parsing [Content_Types].xml: {e}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} content type declaration errors:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print(
+ "PASSED - All content files are properly declared in [Content_Types].xml"
+ )
+ return True
+
+ def validate_file_against_xsd(self, xml_file, verbose=False):
+ xml_file = Path(xml_file).resolve()
+ unpacked_dir = self.unpacked_dir.resolve()
+
+ is_valid, current_errors = self._validate_single_file_xsd(
+ xml_file, unpacked_dir
+ )
+
+ if is_valid is None:
+ return None, set()
+ elif is_valid:
+ return True, set()
+
+ original_errors = self._get_original_file_errors(xml_file)
+
+ assert current_errors is not None
+ new_errors = current_errors - original_errors
+
+ new_errors = {
+ e for e in new_errors
+ if not any(pattern in e for pattern in self.IGNORED_VALIDATION_ERRORS)
+ }
+
+ if new_errors:
+ if verbose:
+ relative_path = xml_file.relative_to(unpacked_dir)
+ print(f"FAILED - {relative_path}: {len(new_errors)} new error(s)")
+ for error in list(new_errors)[:3]:
+ truncated = error[:250] + "..." if len(error) > 250 else error
+ print(f" - {truncated}")
+ return False, new_errors
+ else:
+ if verbose:
+ print(
+ f"PASSED - No new errors (original had {len(current_errors)} errors)"
+ )
+ return True, set()
+
+ def validate_against_xsd(self):
+ new_errors = []
+ original_error_count = 0
+ valid_count = 0
+ skipped_count = 0
+
+ for xml_file in self.xml_files:
+ relative_path = str(xml_file.relative_to(self.unpacked_dir))
+ is_valid, new_file_errors = self.validate_file_against_xsd(
+ xml_file, verbose=False
+ )
+
+ if is_valid is None:
+ skipped_count += 1
+ continue
+ elif is_valid and not new_file_errors:
+ valid_count += 1
+ continue
+ elif is_valid:
+ original_error_count += 1
+ valid_count += 1
+ continue
+
+ new_errors.append(f" {relative_path}: {len(new_file_errors)} new error(s)")
+ for error in list(new_file_errors)[:3]:
+ new_errors.append(
+ f" - {error[:250]}..." if len(error) > 250 else f" - {error}"
+ )
+
+ if self.verbose:
+ print(f"Validated {len(self.xml_files)} files:")
+ print(f" - Valid: {valid_count}")
+ print(f" - Skipped (no schema): {skipped_count}")
+ if original_error_count:
+ print(f" - With original errors (ignored): {original_error_count}")
+ print(
+ f" - With NEW errors: {len(new_errors) > 0 and len([e for e in new_errors if not e.startswith(' ')]) or 0}"
+ )
+
+ if new_errors:
+ print("\nFAILED - Found NEW validation errors:")
+ for error in new_errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("\nPASSED - No new XSD validation errors introduced")
+ return True
+
+ def _get_schema_path(self, xml_file):
+ if xml_file.name in self.SCHEMA_MAPPINGS:
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.name]
+
+ if xml_file.suffix == ".rels":
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[".rels"]
+
+ if "charts/" in str(xml_file) and xml_file.name.startswith("chart"):
+ return self.schemas_dir / self.SCHEMA_MAPPINGS["chart"]
+
+ if "theme/" in str(xml_file) and xml_file.name.startswith("theme"):
+ return self.schemas_dir / self.SCHEMA_MAPPINGS["theme"]
+
+ if xml_file.parent.name in self.MAIN_CONTENT_FOLDERS:
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.parent.name]
+
+ return None
+
+ def _clean_ignorable_namespaces(self, xml_doc):
+ xml_string = lxml.etree.tostring(xml_doc, encoding="unicode")
+ xml_copy = lxml.etree.fromstring(xml_string)
+
+ for elem in xml_copy.iter():
+ attrs_to_remove = []
+
+ for attr in elem.attrib:
+ if "{" in attr:
+ ns = attr.split("}")[0][1:]
+ if ns not in self.OOXML_NAMESPACES:
+ attrs_to_remove.append(attr)
+
+ for attr in attrs_to_remove:
+ del elem.attrib[attr]
+
+ self._remove_ignorable_elements(xml_copy)
+
+ return lxml.etree.ElementTree(xml_copy)
+
+ def _remove_ignorable_elements(self, root):
+ elements_to_remove = []
+
+ for elem in list(root):
+ if not hasattr(elem, "tag") or callable(elem.tag):
+ continue
+
+ tag_str = str(elem.tag)
+ if tag_str.startswith("{"):
+ ns = tag_str.split("}")[0][1:]
+ if ns not in self.OOXML_NAMESPACES:
+ elements_to_remove.append(elem)
+ continue
+
+ self._remove_ignorable_elements(elem)
+
+ for elem in elements_to_remove:
+ root.remove(elem)
+
+ def _preprocess_for_mc_ignorable(self, xml_doc):
+ root = xml_doc.getroot()
+
+ if f"{{{self.MC_NAMESPACE}}}Ignorable" in root.attrib:
+ del root.attrib[f"{{{self.MC_NAMESPACE}}}Ignorable"]
+
+ return xml_doc
+
+ def _validate_single_file_xsd(self, xml_file, base_path):
+ schema_path = self._get_schema_path(xml_file)
+ if not schema_path:
+ return None, None
+
+ try:
+ with open(schema_path, "rb") as xsd_file:
+ parser = lxml.etree.XMLParser()
+ xsd_doc = lxml.etree.parse(
+ xsd_file, parser=parser, base_url=str(schema_path)
+ )
+ schema = lxml.etree.XMLSchema(xsd_doc)
+
+ with open(xml_file, "r") as f:
+ xml_doc = lxml.etree.parse(f)
+
+ xml_doc, _ = self._remove_template_tags_from_text_nodes(xml_doc)
+ xml_doc = self._preprocess_for_mc_ignorable(xml_doc)
+
+ relative_path = xml_file.relative_to(base_path)
+ if (
+ relative_path.parts
+ and relative_path.parts[0] in self.MAIN_CONTENT_FOLDERS
+ ):
+ xml_doc = self._clean_ignorable_namespaces(xml_doc)
+
+ if schema.validate(xml_doc):
+ return True, set()
+ else:
+ errors = set()
+ for error in schema.error_log:
+ errors.add(error.message)
+ return False, errors
+
+ except Exception as e:
+ return False, {str(e)}
+
+ def _get_original_file_errors(self, xml_file):
+ if self.original_file is None:
+ return set()
+
+ import tempfile
+ import zipfile
+
+ xml_file = Path(xml_file).resolve()
+ unpacked_dir = self.unpacked_dir.resolve()
+ relative_path = xml_file.relative_to(unpacked_dir)
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+
+ with zipfile.ZipFile(self.original_file, "r") as zip_ref:
+ zip_ref.extractall(temp_path)
+
+ original_xml_file = temp_path / relative_path
+
+ if not original_xml_file.exists():
+ return set()
+
+ is_valid, errors = self._validate_single_file_xsd(
+ original_xml_file, temp_path
+ )
+ return errors if errors else set()
+
+ def _remove_template_tags_from_text_nodes(self, xml_doc):
+ warnings = []
+ template_pattern = re.compile(r"\{\{[^}]*\}\}")
+
+ xml_string = lxml.etree.tostring(xml_doc, encoding="unicode")
+ xml_copy = lxml.etree.fromstring(xml_string)
+
+ def process_text_content(text, content_type):
+ if not text:
+ return text
+ matches = list(template_pattern.finditer(text))
+ if matches:
+ for match in matches:
+ warnings.append(
+ f"Found template tag in {content_type}: {match.group()}"
+ )
+ return template_pattern.sub("", text)
+ return text
+
+ for elem in xml_copy.iter():
+ if not hasattr(elem, "tag") or callable(elem.tag):
+ continue
+ tag_str = str(elem.tag)
+ if tag_str.endswith("}t") or tag_str == "t":
+ continue
+
+ elem.text = process_text_content(elem.text, "text content")
+ elem.tail = process_text_content(elem.tail, "tail content")
+
+ return lxml.etree.ElementTree(xml_copy), warnings
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/docx/scripts/office/validators/docx.py b/.claude/skills/docx/scripts/office/validators/docx.py
new file mode 100644
index 0000000..fec405e
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/validators/docx.py
@@ -0,0 +1,446 @@
+"""
+Validator for Word document XML files against XSD schemas.
+"""
+
+import random
+import re
+import tempfile
+import zipfile
+
+import defusedxml.minidom
+import lxml.etree
+
+from .base import BaseSchemaValidator
+
+
+class DOCXSchemaValidator(BaseSchemaValidator):
+
+ WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+ W14_NAMESPACE = "http://schemas.microsoft.com/office/word/2010/wordml"
+ W16CID_NAMESPACE = "http://schemas.microsoft.com/office/word/2016/wordml/cid"
+
+ ELEMENT_RELATIONSHIP_TYPES = {}
+
+ def validate(self):
+ if not self.validate_xml():
+ return False
+
+ all_valid = True
+ if not self.validate_namespaces():
+ all_valid = False
+
+ if not self.validate_unique_ids():
+ all_valid = False
+
+ if not self.validate_file_references():
+ all_valid = False
+
+ if not self.validate_content_types():
+ all_valid = False
+
+ if not self.validate_against_xsd():
+ all_valid = False
+
+ if not self.validate_whitespace_preservation():
+ all_valid = False
+
+ if not self.validate_deletions():
+ all_valid = False
+
+ if not self.validate_insertions():
+ all_valid = False
+
+ if not self.validate_all_relationship_ids():
+ all_valid = False
+
+ if not self.validate_id_constraints():
+ all_valid = False
+
+ if not self.validate_comment_markers():
+ all_valid = False
+
+ self.compare_paragraph_counts()
+
+ return all_valid
+
+ def validate_whitespace_preservation(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+
+ for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"):
+ if elem.text:
+ text = elem.text
+ if re.search(r"^[ \t\n\r]", text) or re.search(
+ r"[ \t\n\r]$", text
+ ):
+ xml_space_attr = f"{{{self.XML_NAMESPACE}}}space"
+ if (
+ xml_space_attr not in elem.attrib
+ or elem.attrib[xml_space_attr] != "preserve"
+ ):
+ text_preview = (
+ repr(text)[:50] + "..."
+ if len(repr(text)) > 50
+ else repr(text)
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} whitespace preservation violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All whitespace is properly preserved")
+ return True
+
+ def validate_deletions(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ namespaces = {"w": self.WORD_2006_NAMESPACE}
+
+ for t_elem in root.xpath(".//w:del//w:t", namespaces=namespaces):
+ if t_elem.text:
+ text_preview = (
+ repr(t_elem.text)[:50] + "..."
+ if len(repr(t_elem.text)) > 50
+ else repr(t_elem.text)
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {t_elem.sourceline}: found within : {text_preview}"
+ )
+
+ for instr_elem in root.xpath(
+ ".//w:del//w:instrText", namespaces=namespaces
+ ):
+ text_preview = (
+ repr(instr_elem.text or "")[:50] + "..."
+ if len(repr(instr_elem.text or "")) > 50
+ else repr(instr_elem.text or "")
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {instr_elem.sourceline}: found within (use ): {text_preview}"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} deletion validation violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - No w:t elements found within w:del elements")
+ return True
+
+ def count_paragraphs_in_unpacked(self):
+ count = 0
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p")
+ count = len(paragraphs)
+ except Exception as e:
+ print(f"Error counting paragraphs in unpacked document: {e}")
+
+ return count
+
+ def count_paragraphs_in_original(self):
+ original = self.original_file
+ if original is None:
+ return 0
+
+ count = 0
+
+ try:
+ with tempfile.TemporaryDirectory() as temp_dir:
+ with zipfile.ZipFile(original, "r") as zip_ref:
+ zip_ref.extractall(temp_dir)
+
+ doc_xml_path = temp_dir + "/word/document.xml"
+ root = lxml.etree.parse(doc_xml_path).getroot()
+
+ paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p")
+ count = len(paragraphs)
+
+ except Exception as e:
+ print(f"Error counting paragraphs in original document: {e}")
+
+ return count
+
+ def validate_insertions(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ namespaces = {"w": self.WORD_2006_NAMESPACE}
+
+ invalid_elements = root.xpath(
+ ".//w:ins//w:delText[not(ancestor::w:del)]", namespaces=namespaces
+ )
+
+ for elem in invalid_elements:
+ text_preview = (
+ repr(elem.text or "")[:50] + "..."
+ if len(repr(elem.text or "")) > 50
+ else repr(elem.text or "")
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: within : {text_preview}"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} insertion validation violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - No w:delText elements within w:ins elements")
+ return True
+
+ def compare_paragraph_counts(self):
+ original_count = self.count_paragraphs_in_original()
+ new_count = self.count_paragraphs_in_unpacked()
+
+ diff = new_count - original_count
+ diff_str = f"+{diff}" if diff > 0 else str(diff)
+ print(f"\nParagraphs: {original_count} → {new_count} ({diff_str})")
+
+ def _parse_id_value(self, val: str, base: int = 16) -> int:
+ return int(val, base)
+
+ def validate_id_constraints(self):
+ errors = []
+ para_id_attr = f"{{{self.W14_NAMESPACE}}}paraId"
+ durable_id_attr = f"{{{self.W16CID_NAMESPACE}}}durableId"
+
+ for xml_file in self.xml_files:
+ try:
+ for elem in lxml.etree.parse(str(xml_file)).iter():
+ if val := elem.get(para_id_attr):
+ if self._parse_id_value(val, base=16) >= 0x80000000:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: paraId={val} >= 0x80000000"
+ )
+
+ if val := elem.get(durable_id_attr):
+ if xml_file.name == "numbering.xml":
+ try:
+ if self._parse_id_value(val, base=10) >= 0x7FFFFFFF:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: "
+ f"durableId={val} >= 0x7FFFFFFF"
+ )
+ except ValueError:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: "
+ f"durableId={val} must be decimal in numbering.xml"
+ )
+ else:
+ if self._parse_id_value(val, base=16) >= 0x7FFFFFFF:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: "
+ f"durableId={val} >= 0x7FFFFFFF"
+ )
+ except Exception:
+ pass
+
+ if errors:
+ print(f"FAILED - {len(errors)} ID constraint violations:")
+ for e in errors:
+ print(e)
+ elif self.verbose:
+ print("PASSED - All paraId/durableId values within constraints")
+ return not errors
+
+ def validate_comment_markers(self):
+ errors = []
+
+ document_xml = None
+ comments_xml = None
+ for xml_file in self.xml_files:
+ if xml_file.name == "document.xml" and "word" in str(xml_file):
+ document_xml = xml_file
+ elif xml_file.name == "comments.xml":
+ comments_xml = xml_file
+
+ if not document_xml:
+ if self.verbose:
+ print("PASSED - No document.xml found (skipping comment validation)")
+ return True
+
+ try:
+ doc_root = lxml.etree.parse(str(document_xml)).getroot()
+ namespaces = {"w": self.WORD_2006_NAMESPACE}
+
+ range_starts = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in doc_root.xpath(
+ ".//w:commentRangeStart", namespaces=namespaces
+ )
+ }
+ range_ends = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in doc_root.xpath(
+ ".//w:commentRangeEnd", namespaces=namespaces
+ )
+ }
+ references = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in doc_root.xpath(
+ ".//w:commentReference", namespaces=namespaces
+ )
+ }
+
+ orphaned_ends = range_ends - range_starts
+ for comment_id in sorted(
+ orphaned_ends, key=lambda x: int(x) if x and x.isdigit() else 0
+ ):
+ errors.append(
+ f' document.xml: commentRangeEnd id="{comment_id}" has no matching commentRangeStart'
+ )
+
+ orphaned_starts = range_starts - range_ends
+ for comment_id in sorted(
+ orphaned_starts, key=lambda x: int(x) if x and x.isdigit() else 0
+ ):
+ errors.append(
+ f' document.xml: commentRangeStart id="{comment_id}" has no matching commentRangeEnd'
+ )
+
+ comment_ids = set()
+ if comments_xml and comments_xml.exists():
+ comments_root = lxml.etree.parse(str(comments_xml)).getroot()
+ comment_ids = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in comments_root.xpath(
+ ".//w:comment", namespaces=namespaces
+ )
+ }
+
+ marker_ids = range_starts | range_ends | references
+ invalid_refs = marker_ids - comment_ids
+ for comment_id in sorted(
+ invalid_refs, key=lambda x: int(x) if x and x.isdigit() else 0
+ ):
+ if comment_id:
+ errors.append(
+ f' document.xml: marker id="{comment_id}" references non-existent comment'
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(f" Error parsing XML: {e}")
+
+ if errors:
+ print(f"FAILED - {len(errors)} comment marker violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All comment markers properly paired")
+ return True
+
+ def repair(self) -> int:
+ repairs = super().repair()
+ repairs += self.repair_durableId()
+ return repairs
+
+ def repair_durableId(self) -> int:
+ repairs = 0
+
+ for xml_file in self.xml_files:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ modified = False
+
+ for elem in dom.getElementsByTagName("*"):
+ if not elem.hasAttribute("w16cid:durableId"):
+ continue
+
+ durable_id = elem.getAttribute("w16cid:durableId")
+ needs_repair = False
+
+ if xml_file.name == "numbering.xml":
+ try:
+ needs_repair = (
+ self._parse_id_value(durable_id, base=10) >= 0x7FFFFFFF
+ )
+ except ValueError:
+ needs_repair = True
+ else:
+ try:
+ needs_repair = (
+ self._parse_id_value(durable_id, base=16) >= 0x7FFFFFFF
+ )
+ except ValueError:
+ needs_repair = True
+
+ if needs_repair:
+ value = random.randint(1, 0x7FFFFFFE)
+ if xml_file.name == "numbering.xml":
+ new_id = str(value)
+ else:
+ new_id = f"{value:08X}"
+
+ elem.setAttribute("w16cid:durableId", new_id)
+ print(
+ f" Repaired: {xml_file.name}: durableId {durable_id} → {new_id}"
+ )
+ repairs += 1
+ modified = True
+
+ if modified:
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+
+ except Exception:
+ pass
+
+ return repairs
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/docx/scripts/office/validators/pptx.py b/.claude/skills/docx/scripts/office/validators/pptx.py
new file mode 100644
index 0000000..09842aa
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/validators/pptx.py
@@ -0,0 +1,275 @@
+"""
+Validator for PowerPoint presentation XML files against XSD schemas.
+"""
+
+import re
+
+from .base import BaseSchemaValidator
+
+
+class PPTXSchemaValidator(BaseSchemaValidator):
+
+ PRESENTATIONML_NAMESPACE = (
+ "http://schemas.openxmlformats.org/presentationml/2006/main"
+ )
+
+ ELEMENT_RELATIONSHIP_TYPES = {
+ "sldid": "slide",
+ "sldmasterid": "slidemaster",
+ "notesmasterid": "notesmaster",
+ "sldlayoutid": "slidelayout",
+ "themeid": "theme",
+ "tablestyleid": "tablestyles",
+ }
+
+ def validate(self):
+ if not self.validate_xml():
+ return False
+
+ all_valid = True
+ if not self.validate_namespaces():
+ all_valid = False
+
+ if not self.validate_unique_ids():
+ all_valid = False
+
+ if not self.validate_uuid_ids():
+ all_valid = False
+
+ if not self.validate_file_references():
+ all_valid = False
+
+ if not self.validate_slide_layout_ids():
+ all_valid = False
+
+ if not self.validate_content_types():
+ all_valid = False
+
+ if not self.validate_against_xsd():
+ all_valid = False
+
+ if not self.validate_notes_slide_references():
+ all_valid = False
+
+ if not self.validate_all_relationship_ids():
+ all_valid = False
+
+ if not self.validate_no_duplicate_slide_layouts():
+ all_valid = False
+
+ return all_valid
+
+ def validate_uuid_ids(self):
+ import lxml.etree
+
+ errors = []
+ uuid_pattern = re.compile(
+ r"^[\{\(]?[0-9A-Fa-f]{8}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{12}[\}\)]?$"
+ )
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+
+ for elem in root.iter():
+ for attr, value in elem.attrib.items():
+ attr_name = attr.split("}")[-1].lower()
+ if attr_name == "id" or attr_name.endswith("id"):
+ if self._looks_like_uuid(value):
+ if not uuid_pattern.match(value):
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: ID '{value}' appears to be a UUID but contains invalid hex characters"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} UUID ID validation errors:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All UUID-like IDs contain valid hex values")
+ return True
+
+ def _looks_like_uuid(self, value):
+ clean_value = value.strip("{}()").replace("-", "")
+ return len(clean_value) == 32 and all(c.isalnum() for c in clean_value)
+
+ def validate_slide_layout_ids(self):
+ import lxml.etree
+
+ errors = []
+
+ slide_masters = list(self.unpacked_dir.glob("ppt/slideMasters/*.xml"))
+
+ if not slide_masters:
+ if self.verbose:
+ print("PASSED - No slide masters found")
+ return True
+
+ for slide_master in slide_masters:
+ try:
+ root = lxml.etree.parse(str(slide_master)).getroot()
+
+ rels_file = slide_master.parent / "_rels" / f"{slide_master.name}.rels"
+
+ if not rels_file.exists():
+ errors.append(
+ f" {slide_master.relative_to(self.unpacked_dir)}: "
+ f"Missing relationships file: {rels_file.relative_to(self.unpacked_dir)}"
+ )
+ continue
+
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+
+ valid_layout_rids = set()
+ for rel in rels_root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ ):
+ rel_type = rel.get("Type", "")
+ if "slideLayout" in rel_type:
+ valid_layout_rids.add(rel.get("Id"))
+
+ for sld_layout_id in root.findall(
+ f".//{{{self.PRESENTATIONML_NAMESPACE}}}sldLayoutId"
+ ):
+ r_id = sld_layout_id.get(
+ f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id"
+ )
+ layout_id = sld_layout_id.get("id")
+
+ if r_id and r_id not in valid_layout_rids:
+ errors.append(
+ f" {slide_master.relative_to(self.unpacked_dir)}: "
+ f"Line {sld_layout_id.sourceline}: sldLayoutId with id='{layout_id}' "
+ f"references r:id='{r_id}' which is not found in slide layout relationships"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {slide_master.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} slide layout ID validation errors:")
+ for error in errors:
+ print(error)
+ print(
+ "Remove invalid references or add missing slide layouts to the relationships file."
+ )
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All slide layout IDs reference valid slide layouts")
+ return True
+
+ def validate_no_duplicate_slide_layouts(self):
+ import lxml.etree
+
+ errors = []
+ slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels"))
+
+ for rels_file in slide_rels_files:
+ try:
+ root = lxml.etree.parse(str(rels_file)).getroot()
+
+ layout_rels = [
+ rel
+ for rel in root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ )
+ if "slideLayout" in rel.get("Type", "")
+ ]
+
+ if len(layout_rels) > 1:
+ errors.append(
+ f" {rels_file.relative_to(self.unpacked_dir)}: has {len(layout_rels)} slideLayout references"
+ )
+
+ except Exception as e:
+ errors.append(
+ f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print("FAILED - Found slides with duplicate slideLayout references:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All slides have exactly one slideLayout reference")
+ return True
+
+ def validate_notes_slide_references(self):
+ import lxml.etree
+
+ errors = []
+ notes_slide_references = {}
+
+ slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels"))
+
+ if not slide_rels_files:
+ if self.verbose:
+ print("PASSED - No slide relationship files found")
+ return True
+
+ for rels_file in slide_rels_files:
+ try:
+ root = lxml.etree.parse(str(rels_file)).getroot()
+
+ for rel in root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ ):
+ rel_type = rel.get("Type", "")
+ if "notesSlide" in rel_type:
+ target = rel.get("Target", "")
+ if target:
+ normalized_target = target.replace("../", "")
+
+ slide_name = rels_file.stem.replace(
+ ".xml", ""
+ )
+
+ if normalized_target not in notes_slide_references:
+ notes_slide_references[normalized_target] = []
+ notes_slide_references[normalized_target].append(
+ (slide_name, rels_file)
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ for target, references in notes_slide_references.items():
+ if len(references) > 1:
+ slide_names = [ref[0] for ref in references]
+ errors.append(
+ f" Notes slide '{target}' is referenced by multiple slides: {', '.join(slide_names)}"
+ )
+ for slide_name, rels_file in references:
+ errors.append(f" - {rels_file.relative_to(self.unpacked_dir)}")
+
+ if errors:
+ print(
+ f"FAILED - Found {len([e for e in errors if not e.startswith(' ')])} notes slide reference validation errors:"
+ )
+ for error in errors:
+ print(error)
+ print("Each slide may optionally have its own slide file.")
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All notes slide references are unique")
+ return True
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/docx/scripts/office/validators/redlining.py b/.claude/skills/docx/scripts/office/validators/redlining.py
new file mode 100644
index 0000000..71c81b6
--- /dev/null
+++ b/.claude/skills/docx/scripts/office/validators/redlining.py
@@ -0,0 +1,247 @@
+"""
+Validator for tracked changes in Word documents.
+"""
+
+import subprocess
+import tempfile
+import zipfile
+from pathlib import Path
+
+
+class RedliningValidator:
+
+ def __init__(self, unpacked_dir, original_docx, verbose=False, author="Claude"):
+ self.unpacked_dir = Path(unpacked_dir)
+ self.original_docx = Path(original_docx)
+ self.verbose = verbose
+ self.author = author
+ self.namespaces = {
+ "w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+ }
+
+ def repair(self) -> int:
+ return 0
+
+ def validate(self):
+ modified_file = self.unpacked_dir / "word" / "document.xml"
+ if not modified_file.exists():
+ print(f"FAILED - Modified document.xml not found at {modified_file}")
+ return False
+
+ try:
+ import xml.etree.ElementTree as ET
+
+ tree = ET.parse(modified_file)
+ root = tree.getroot()
+
+ del_elements = root.findall(".//w:del", self.namespaces)
+ ins_elements = root.findall(".//w:ins", self.namespaces)
+
+ author_del_elements = [
+ elem
+ for elem in del_elements
+ if elem.get(f"{{{self.namespaces['w']}}}author") == self.author
+ ]
+ author_ins_elements = [
+ elem
+ for elem in ins_elements
+ if elem.get(f"{{{self.namespaces['w']}}}author") == self.author
+ ]
+
+ if not author_del_elements and not author_ins_elements:
+ if self.verbose:
+ print(f"PASSED - No tracked changes by {self.author} found.")
+ return True
+
+ except Exception:
+ pass
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+
+ try:
+ with zipfile.ZipFile(self.original_docx, "r") as zip_ref:
+ zip_ref.extractall(temp_path)
+ except Exception as e:
+ print(f"FAILED - Error unpacking original docx: {e}")
+ return False
+
+ original_file = temp_path / "word" / "document.xml"
+ if not original_file.exists():
+ print(
+ f"FAILED - Original document.xml not found in {self.original_docx}"
+ )
+ return False
+
+ try:
+ import xml.etree.ElementTree as ET
+
+ modified_tree = ET.parse(modified_file)
+ modified_root = modified_tree.getroot()
+ original_tree = ET.parse(original_file)
+ original_root = original_tree.getroot()
+ except ET.ParseError as e:
+ print(f"FAILED - Error parsing XML files: {e}")
+ return False
+
+ self._remove_author_tracked_changes(original_root)
+ self._remove_author_tracked_changes(modified_root)
+
+ modified_text = self._extract_text_content(modified_root)
+ original_text = self._extract_text_content(original_root)
+
+ if modified_text != original_text:
+ error_message = self._generate_detailed_diff(
+ original_text, modified_text
+ )
+ print(error_message)
+ return False
+
+ if self.verbose:
+ print(f"PASSED - All changes by {self.author} are properly tracked")
+ return True
+
+ def _generate_detailed_diff(self, original_text, modified_text):
+ error_parts = [
+ f"FAILED - Document text doesn't match after removing {self.author}'s tracked changes",
+ "",
+ "Likely causes:",
+ " 1. Modified text inside another author's or tags",
+ " 2. Made edits without proper tracked changes",
+ " 3. Didn't nest inside when deleting another's insertion",
+ "",
+ "For pre-redlined documents, use correct patterns:",
+ " - To reject another's INSERTION: Nest inside their ",
+ " - To restore another's DELETION: Add new AFTER their ",
+ "",
+ ]
+
+ git_diff = self._get_git_word_diff(original_text, modified_text)
+ if git_diff:
+ error_parts.extend(["Differences:", "============", git_diff])
+ else:
+ error_parts.append("Unable to generate word diff (git not available)")
+
+ return "\n".join(error_parts)
+
+ def _get_git_word_diff(self, original_text, modified_text):
+ try:
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+
+ original_file = temp_path / "original.txt"
+ modified_file = temp_path / "modified.txt"
+
+ original_file.write_text(original_text, encoding="utf-8")
+ modified_file.write_text(modified_text, encoding="utf-8")
+
+ result = subprocess.run(
+ [
+ "git",
+ "diff",
+ "--word-diff=plain",
+ "--word-diff-regex=.",
+ "-U0",
+ "--no-index",
+ str(original_file),
+ str(modified_file),
+ ],
+ capture_output=True,
+ text=True,
+ )
+
+ if result.stdout.strip():
+ lines = result.stdout.split("\n")
+ content_lines = []
+ in_content = False
+ for line in lines:
+ if line.startswith("@@"):
+ in_content = True
+ continue
+ if in_content and line.strip():
+ content_lines.append(line)
+
+ if content_lines:
+ return "\n".join(content_lines)
+
+ result = subprocess.run(
+ [
+ "git",
+ "diff",
+ "--word-diff=plain",
+ "-U0",
+ "--no-index",
+ str(original_file),
+ str(modified_file),
+ ],
+ capture_output=True,
+ text=True,
+ )
+
+ if result.stdout.strip():
+ lines = result.stdout.split("\n")
+ content_lines = []
+ in_content = False
+ for line in lines:
+ if line.startswith("@@"):
+ in_content = True
+ continue
+ if in_content and line.strip():
+ content_lines.append(line)
+ return "\n".join(content_lines)
+
+ except (subprocess.CalledProcessError, FileNotFoundError, Exception):
+ pass
+
+ return None
+
+ def _remove_author_tracked_changes(self, root):
+ ins_tag = f"{{{self.namespaces['w']}}}ins"
+ del_tag = f"{{{self.namespaces['w']}}}del"
+ author_attr = f"{{{self.namespaces['w']}}}author"
+
+ for parent in root.iter():
+ to_remove = []
+ for child in parent:
+ if child.tag == ins_tag and child.get(author_attr) == self.author:
+ to_remove.append(child)
+ for elem in to_remove:
+ parent.remove(elem)
+
+ deltext_tag = f"{{{self.namespaces['w']}}}delText"
+ t_tag = f"{{{self.namespaces['w']}}}t"
+
+ for parent in root.iter():
+ to_process = []
+ for child in parent:
+ if child.tag == del_tag and child.get(author_attr) == self.author:
+ to_process.append((child, list(parent).index(child)))
+
+ for del_elem, del_index in reversed(to_process):
+ for elem in del_elem.iter():
+ if elem.tag == deltext_tag:
+ elem.tag = t_tag
+
+ for child in reversed(list(del_elem)):
+ parent.insert(del_index, child)
+ parent.remove(del_elem)
+
+ def _extract_text_content(self, root):
+ p_tag = f"{{{self.namespaces['w']}}}p"
+ t_tag = f"{{{self.namespaces['w']}}}t"
+
+ paragraphs = []
+ for p_elem in root.findall(f".//{p_tag}"):
+ text_parts = []
+ for t_elem in p_elem.findall(f".//{t_tag}"):
+ if t_elem.text:
+ text_parts.append(t_elem.text)
+ paragraph_text = "".join(text_parts)
+ if paragraph_text:
+ paragraphs.append(paragraph_text)
+
+ return "\n".join(paragraphs)
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/docx/scripts/templates/comments.xml b/.claude/skills/docx/scripts/templates/comments.xml
new file mode 100644
index 0000000..cd01a7d
--- /dev/null
+++ b/.claude/skills/docx/scripts/templates/comments.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/.claude/skills/docx/scripts/templates/commentsExtended.xml b/.claude/skills/docx/scripts/templates/commentsExtended.xml
new file mode 100644
index 0000000..411003c
--- /dev/null
+++ b/.claude/skills/docx/scripts/templates/commentsExtended.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/.claude/skills/docx/scripts/templates/commentsExtensible.xml b/.claude/skills/docx/scripts/templates/commentsExtensible.xml
new file mode 100644
index 0000000..f5572d7
--- /dev/null
+++ b/.claude/skills/docx/scripts/templates/commentsExtensible.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/.claude/skills/docx/scripts/templates/commentsIds.xml b/.claude/skills/docx/scripts/templates/commentsIds.xml
new file mode 100644
index 0000000..32f1629
--- /dev/null
+++ b/.claude/skills/docx/scripts/templates/commentsIds.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/.claude/skills/docx/scripts/templates/people.xml b/.claude/skills/docx/scripts/templates/people.xml
new file mode 100644
index 0000000..3803d2d
--- /dev/null
+++ b/.claude/skills/docx/scripts/templates/people.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/.claude/skills/frontend-design/.openskills.json b/.claude/skills/frontend-design/.openskills.json
new file mode 100644
index 0000000..a445f48
--- /dev/null
+++ b/.claude/skills/frontend-design/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\frontend-design",
+ "installedAt": "2026-03-02T09:19:50.104Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/frontend-design/LICENSE.txt b/.claude/skills/frontend-design/LICENSE.txt
new file mode 100644
index 0000000..f433b1a
--- /dev/null
+++ b/.claude/skills/frontend-design/LICENSE.txt
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/.claude/skills/frontend-design/SKILL.md b/.claude/skills/frontend-design/SKILL.md
new file mode 100644
index 0000000..5be498e
--- /dev/null
+++ b/.claude/skills/frontend-design/SKILL.md
@@ -0,0 +1,42 @@
+---
+name: frontend-design
+description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, artifacts, posters, or applications (examples include websites, landing pages, dashboards, React components, HTML/CSS layouts, or when styling/beautifying any web UI). Generates creative, polished code and UI design that avoids generic AI aesthetics.
+license: Complete terms in LICENSE.txt
+---
+
+This skill guides creation of distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics. Implement real working code with exceptional attention to aesthetic details and creative choices.
+
+The user provides frontend requirements: a component, page, application, or interface to build. They may include context about the purpose, audience, or technical constraints.
+
+## Design Thinking
+
+Before coding, understand the context and commit to a BOLD aesthetic direction:
+- **Purpose**: What problem does this interface solve? Who uses it?
+- **Tone**: Pick an extreme: brutally minimal, maximalist chaos, retro-futuristic, organic/natural, luxury/refined, playful/toy-like, editorial/magazine, brutalist/raw, art deco/geometric, soft/pastel, industrial/utilitarian, etc. There are so many flavors to choose from. Use these for inspiration but design one that is true to the aesthetic direction.
+- **Constraints**: Technical requirements (framework, performance, accessibility).
+- **Differentiation**: What makes this UNFORGETTABLE? What's the one thing someone will remember?
+
+**CRITICAL**: Choose a clear conceptual direction and execute it with precision. Bold maximalism and refined minimalism both work - the key is intentionality, not intensity.
+
+Then implement working code (HTML/CSS/JS, React, Vue, etc.) that is:
+- Production-grade and functional
+- Visually striking and memorable
+- Cohesive with a clear aesthetic point-of-view
+- Meticulously refined in every detail
+
+## Frontend Aesthetics Guidelines
+
+Focus on:
+- **Typography**: Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics; unexpected, characterful font choices. Pair a distinctive display font with a refined body font.
+- **Color & Theme**: Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes.
+- **Motion**: Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. Use scroll-triggering and hover states that surprise.
+- **Spatial Composition**: Unexpected layouts. Asymmetry. Overlap. Diagonal flow. Grid-breaking elements. Generous negative space OR controlled density.
+- **Backgrounds & Visual Details**: Create atmosphere and depth rather than defaulting to solid colors. Add contextual effects and textures that match the overall aesthetic. Apply creative forms like gradient meshes, noise textures, geometric patterns, layered transparencies, dramatic shadows, decorative borders, custom cursors, and grain overlays.
+
+NEVER use generic AI-generated aesthetics like overused font families (Inter, Roboto, Arial, system fonts), cliched color schemes (particularly purple gradients on white backgrounds), predictable layouts and component patterns, and cookie-cutter design that lacks context-specific character.
+
+Interpret creatively and make unexpected choices that feel genuinely designed for the context. No design should be the same. Vary between light and dark themes, different fonts, different aesthetics. NEVER converge on common choices (Space Grotesk, for example) across generations.
+
+**IMPORTANT**: Match implementation complexity to the aesthetic vision. Maximalist designs need elaborate code with extensive animations and effects. Minimalist or refined designs need restraint, precision, and careful attention to spacing, typography, and subtle details. Elegance comes from executing the vision well.
+
+Remember: Claude is capable of extraordinary creative work. Don't hold back, show what can truly be created when thinking outside the box and committing fully to a distinctive vision.
diff --git a/.claude/skills/internal-comms/.openskills.json b/.claude/skills/internal-comms/.openskills.json
new file mode 100644
index 0000000..ae0f1ec
--- /dev/null
+++ b/.claude/skills/internal-comms/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\internal-comms",
+ "installedAt": "2026-03-02T09:19:50.108Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/internal-comms/LICENSE.txt b/.claude/skills/internal-comms/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/internal-comms/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/internal-comms/SKILL.md b/.claude/skills/internal-comms/SKILL.md
new file mode 100644
index 0000000..56ea935
--- /dev/null
+++ b/.claude/skills/internal-comms/SKILL.md
@@ -0,0 +1,32 @@
+---
+name: internal-comms
+description: A set of resources to help me write all kinds of internal communications, using the formats that my company likes to use. Claude should use this skill whenever asked to write some sort of internal communications (status reports, leadership updates, 3P updates, company newsletters, FAQs, incident reports, project updates, etc.).
+license: Complete terms in LICENSE.txt
+---
+
+## When to use this skill
+To write internal communications, use this skill for:
+- 3P updates (Progress, Plans, Problems)
+- Company newsletters
+- FAQ responses
+- Status reports
+- Leadership updates
+- Project updates
+- Incident reports
+
+## How to use this skill
+
+To write any internal communication:
+
+1. **Identify the communication type** from the request
+2. **Load the appropriate guideline file** from the `examples/` directory:
+ - `examples/3p-updates.md` - For Progress/Plans/Problems team updates
+ - `examples/company-newsletter.md` - For company-wide newsletters
+ - `examples/faq-answers.md` - For answering frequently asked questions
+ - `examples/general-comms.md` - For anything else that doesn't explicitly match one of the above
+3. **Follow the specific instructions** in that file for formatting, tone, and content gathering
+
+If the communication type doesn't match any existing guideline, ask for clarification or more context about the desired format.
+
+## Keywords
+3P updates, company newsletter, company comms, weekly update, faqs, common questions, updates, internal comms
diff --git a/.claude/skills/internal-comms/examples/3p-updates.md b/.claude/skills/internal-comms/examples/3p-updates.md
new file mode 100644
index 0000000..5329bfb
--- /dev/null
+++ b/.claude/skills/internal-comms/examples/3p-updates.md
@@ -0,0 +1,47 @@
+## Instructions
+You are being asked to write a 3P update. 3P updates stand for "Progress, Plans, Problems." The main audience is for executives, leadership, other teammates, etc. They're meant to be very succinct and to-the-point: think something you can read in 30-60sec or less. They're also for people with some, but not a lot of context on what the team does.
+
+3Ps can cover a team of any size, ranging all the way up to the entire company. The bigger the team, the less granular the tasks should be. For example, "mobile team" might have "shipped feature" or "fixed bugs," whereas the company might have really meaty 3Ps, like "hired 20 new people" or "closed 10 new deals."
+
+They represent the work of the team across a time period, almost always one week. They include three sections:
+1) Progress: what the team has accomplished over the next time period. Focus mainly on things shipped, milestones achieved, tasks created, etc.
+2) Plans: what the team plans to do over the next time period. Focus on what things are top-of-mind, really high priority, etc. for the team.
+3) Problems: anything that is slowing the team down. This could be things like too few people, bugs or blockers that are preventing the team from moving forward, some deal that fell through, etc.
+
+Before writing them, make sure that you know the team name. If it's not specified, you can ask explicitly what the team name you're writing for is.
+
+
+## Tools Available
+Whenever possible, try to pull from available sources to get the information you need:
+- Slack: posts from team members with their updates - ideally look for posts in large channels with lots of reactions
+- Google Drive: docs written from critical team members with lots of views
+- Email: emails with lots of responses of lots of content that seems relevant
+- Calendar: non-recurring meetings that have a lot of importance, like product reviews, etc.
+
+
+Try to gather as much context as you can, focusing on the things that covered the time period you're writing for:
+- Progress: anything between a week ago and today
+- Plans: anything from today to the next week
+- Problems: anything between a week ago and today
+
+
+If you don't have access, you can ask the user for things they want to cover. They might also include these things to you directly, in which case you're mostly just formatting for this particular format.
+
+## Workflow
+
+1. **Clarify scope**: Confirm the team name and time period (usually past week for Progress/Problems, next
+week for Plans)
+2. **Gather information**: Use available tools or ask the user directly
+3. **Draft the update**: Follow the strict formatting guidelines
+4. **Review**: Ensure it's concise (30-60 seconds to read) and data-driven
+
+## Formatting
+
+The format is always the same, very strict formatting. Never use any formatting other than this. Pick an emoji that is fun and captures the vibe of the team and update.
+
+[pick an emoji] [Team Name] (Dates Covered, usually a week)
+Progress: [1-3 sentences of content]
+Plans: [1-3 sentences of content]
+Problems: [1-3 sentences of content]
+
+Each section should be no more than 1-3 sentences: clear, to the point. It should be data-driven, and generally include metrics where possible. The tone should be very matter-of-fact, not super prose-heavy.
\ No newline at end of file
diff --git a/.claude/skills/internal-comms/examples/company-newsletter.md b/.claude/skills/internal-comms/examples/company-newsletter.md
new file mode 100644
index 0000000..4997a07
--- /dev/null
+++ b/.claude/skills/internal-comms/examples/company-newsletter.md
@@ -0,0 +1,65 @@
+## Instructions
+You are being asked to write a company-wide newsletter update. You are meant to summarize the past week/month of a company in the form of a newsletter that the entire company will read. It should be maybe ~20-25 bullet points long. It will be sent via Slack and email, so make it consumable for that.
+
+Ideally it includes the following attributes:
+- Lots of links: pulling documents from Google Drive that are very relevant, linking to prominent Slack messages in announce channels and from executives, perhgaps referencing emails that went company-wide, highlighting significant things that have happened in the company.
+- Short and to-the-point: each bullet should probably be no longer than ~1-2 sentences
+- Use the "we" tense, as you are part of the company. Many of the bullets should say "we did this" or "we did that"
+
+## Tools to use
+If you have access to the following tools, please try to use them. If not, you can also let the user know directly that their responses would be better if they gave them access.
+
+- Slack: look for messages in channels with lots of people, with lots of reactions or lots of responses within the thread
+- Email: look for things from executives that discuss company-wide announcements
+- Calendar: if there were meetings with large attendee lists, particularly things like All-Hands meetings, big company announcements, etc. If there were documents attached to those meetings, those are great links to include.
+- Documents: if there were new docs published in the last week or two that got a lot of attention, you can link them. These should be things like company-wide vision docs, plans for the upcoming quarter or half, things authored by critical executives, etc.
+- External press: if you see references to articles or press we've received over the past week, that could be really cool too.
+
+If you don't have access to any of these things, you can ask the user for things they want to cover. In this case, you'll mostly just be polishing up and fitting to this format more directly.
+
+## Sections
+The company is pretty big: 1000+ people. There are a variety of different teams and initiatives going on across the company. To make sure the update works well, try breaking it into sections of similar things. You might break into clusters like {product development, go to market, finance} or {recruiting, execution, vision}, or {external news, internal news} etc. Try to make sure the different areas of the company are highlighted well.
+
+## Prioritization
+Focus on:
+- Company-wide impact (not team-specific details)
+- Announcements from leadership
+- Major milestones and achievements
+- Information that affects most employees
+- External recognition or press
+
+Avoid:
+- Overly granular team updates (save those for 3Ps)
+- Information only relevant to small groups
+- Duplicate information already communicated
+
+## Example Formats
+
+:megaphone: Company Announcements
+- Announcement 1
+- Announcement 2
+- Announcement 3
+
+:dart: Progress on Priorities
+- Area 1
+ - Sub-area 1
+ - Sub-area 2
+ - Sub-area 3
+- Area 2
+ - Sub-area 1
+ - Sub-area 2
+ - Sub-area 3
+- Area 3
+ - Sub-area 1
+ - Sub-area 2
+ - Sub-area 3
+
+:pillar: Leadership Updates
+- Post 1
+- Post 2
+- Post 3
+
+:thread: Social Updates
+- Update 1
+- Update 2
+- Update 3
diff --git a/.claude/skills/internal-comms/examples/faq-answers.md b/.claude/skills/internal-comms/examples/faq-answers.md
new file mode 100644
index 0000000..395262a
--- /dev/null
+++ b/.claude/skills/internal-comms/examples/faq-answers.md
@@ -0,0 +1,30 @@
+## Instructions
+You are an assistant for answering questions that are being asked across the company. Every week, there are lots of questions that get asked across the company, and your goal is to try to summarize what those questions are. We want our company to be well-informed and on the same page, so your job is to produce a set of frequently asked questions that our employees are asking and attempt to answer them. Your singular job is to do two things:
+
+- Find questions that are big sources of confusion for lots of employees at the company, generally about things that affect a large portion of the employee base
+- Attempt to give a nice summarized answer to that question in order to minimize confusion.
+
+Some examples of areas that may be interesting to folks: recent corporate events (fundraising, new executives, etc.), upcoming launches, hiring progress, changes to vision or focus, etc.
+
+
+## Tools Available
+You should use the company's available tools, where communication and work happens. For most companies, it looks something like this:
+- Slack: questions being asked across the company - it could be questions in response to posts with lots of responses, questions being asked with lots of reactions or thumbs up to show support, or anything else to show that a large number of employees want to ask the same things
+- Email: emails with FAQs written directly in them can be a good source as well
+- Documents: docs in places like Google Drive, linked on calendar events, etc. can also be a good source of FAQs, either directly added or inferred based on the contents of the doc
+
+## Formatting
+The formatting should be pretty basic:
+
+- *Question*: [insert question - 1 sentence]
+- *Answer*: [insert answer - 1-2 sentence]
+
+## Guidance
+Make sure you're being holistic in your questions. Don't focus too much on just the user in question or the team they are a part of, but try to capture the entire company. Try to be as holistic as you can in reading all the tools available, producing responses that are relevant to all at the company.
+
+## Answer Guidelines
+- Base answers on official company communications when possible
+- If information is uncertain, indicate that clearly
+- Link to authoritative sources (docs, announcements, emails)
+- Keep tone professional but approachable
+- Flag if a question requires executive input or official response
\ No newline at end of file
diff --git a/.claude/skills/internal-comms/examples/general-comms.md b/.claude/skills/internal-comms/examples/general-comms.md
new file mode 100644
index 0000000..0ea9770
--- /dev/null
+++ b/.claude/skills/internal-comms/examples/general-comms.md
@@ -0,0 +1,16 @@
+ ## Instructions
+ You are being asked to write internal company communication that doesn't fit into the standard formats (3P
+ updates, newsletters, or FAQs).
+
+ Before proceeding:
+ 1. Ask the user about their target audience
+ 2. Understand the communication's purpose
+ 3. Clarify the desired tone (formal, casual, urgent, informational)
+ 4. Confirm any specific formatting requirements
+
+ Use these general principles:
+ - Be clear and concise
+ - Use active voice
+ - Put the most important information first
+ - Include relevant links and references
+ - Match the company's communication style
\ No newline at end of file
diff --git a/.claude/skills/mcp-builder/.openskills.json b/.claude/skills/mcp-builder/.openskills.json
new file mode 100644
index 0000000..6cf1c09
--- /dev/null
+++ b/.claude/skills/mcp-builder/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\mcp-builder",
+ "installedAt": "2026-03-02T09:19:50.113Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/mcp-builder/LICENSE.txt b/.claude/skills/mcp-builder/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/mcp-builder/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/mcp-builder/SKILL.md b/.claude/skills/mcp-builder/SKILL.md
new file mode 100644
index 0000000..8a1a77a
--- /dev/null
+++ b/.claude/skills/mcp-builder/SKILL.md
@@ -0,0 +1,236 @@
+---
+name: mcp-builder
+description: Guide for creating high-quality MCP (Model Context Protocol) servers that enable LLMs to interact with external services through well-designed tools. Use when building MCP servers to integrate external APIs or services, whether in Python (FastMCP) or Node/TypeScript (MCP SDK).
+license: Complete terms in LICENSE.txt
+---
+
+# MCP Server Development Guide
+
+## Overview
+
+Create MCP (Model Context Protocol) servers that enable LLMs to interact with external services through well-designed tools. The quality of an MCP server is measured by how well it enables LLMs to accomplish real-world tasks.
+
+---
+
+# Process
+
+## 🚀 High-Level Workflow
+
+Creating a high-quality MCP server involves four main phases:
+
+### Phase 1: Deep Research and Planning
+
+#### 1.1 Understand Modern MCP Design
+
+**API Coverage vs. Workflow Tools:**
+Balance comprehensive API endpoint coverage with specialized workflow tools. Workflow tools can be more convenient for specific tasks, while comprehensive coverage gives agents flexibility to compose operations. Performance varies by client—some clients benefit from code execution that combines basic tools, while others work better with higher-level workflows. When uncertain, prioritize comprehensive API coverage.
+
+**Tool Naming and Discoverability:**
+Clear, descriptive tool names help agents find the right tools quickly. Use consistent prefixes (e.g., `github_create_issue`, `github_list_repos`) and action-oriented naming.
+
+**Context Management:**
+Agents benefit from concise tool descriptions and the ability to filter/paginate results. Design tools that return focused, relevant data. Some clients support code execution which can help agents filter and process data efficiently.
+
+**Actionable Error Messages:**
+Error messages should guide agents toward solutions with specific suggestions and next steps.
+
+#### 1.2 Study MCP Protocol Documentation
+
+**Navigate the MCP specification:**
+
+Start with the sitemap to find relevant pages: `https://modelcontextprotocol.io/sitemap.xml`
+
+Then fetch specific pages with `.md` suffix for markdown format (e.g., `https://modelcontextprotocol.io/specification/draft.md`).
+
+Key pages to review:
+- Specification overview and architecture
+- Transport mechanisms (streamable HTTP, stdio)
+- Tool, resource, and prompt definitions
+
+#### 1.3 Study Framework Documentation
+
+**Recommended stack:**
+- **Language**: TypeScript (high-quality SDK support and good compatibility in many execution environments e.g. MCPB. Plus AI models are good at generating TypeScript code, benefiting from its broad usage, static typing and good linting tools)
+- **Transport**: Streamable HTTP for remote servers, using stateless JSON (simpler to scale and maintain, as opposed to stateful sessions and streaming responses). stdio for local servers.
+
+**Load framework documentation:**
+
+- **MCP Best Practices**: [📋 View Best Practices](./reference/mcp_best_practices.md) - Core guidelines
+
+**For TypeScript (recommended):**
+- **TypeScript SDK**: Use WebFetch to load `https://raw.githubusercontent.com/modelcontextprotocol/typescript-sdk/main/README.md`
+- [⚡ TypeScript Guide](./reference/node_mcp_server.md) - TypeScript patterns and examples
+
+**For Python:**
+- **Python SDK**: Use WebFetch to load `https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/README.md`
+- [🐍 Python Guide](./reference/python_mcp_server.md) - Python patterns and examples
+
+#### 1.4 Plan Your Implementation
+
+**Understand the API:**
+Review the service's API documentation to identify key endpoints, authentication requirements, and data models. Use web search and WebFetch as needed.
+
+**Tool Selection:**
+Prioritize comprehensive API coverage. List endpoints to implement, starting with the most common operations.
+
+---
+
+### Phase 2: Implementation
+
+#### 2.1 Set Up Project Structure
+
+See language-specific guides for project setup:
+- [⚡ TypeScript Guide](./reference/node_mcp_server.md) - Project structure, package.json, tsconfig.json
+- [🐍 Python Guide](./reference/python_mcp_server.md) - Module organization, dependencies
+
+#### 2.2 Implement Core Infrastructure
+
+Create shared utilities:
+- API client with authentication
+- Error handling helpers
+- Response formatting (JSON/Markdown)
+- Pagination support
+
+#### 2.3 Implement Tools
+
+For each tool:
+
+**Input Schema:**
+- Use Zod (TypeScript) or Pydantic (Python)
+- Include constraints and clear descriptions
+- Add examples in field descriptions
+
+**Output Schema:**
+- Define `outputSchema` where possible for structured data
+- Use `structuredContent` in tool responses (TypeScript SDK feature)
+- Helps clients understand and process tool outputs
+
+**Tool Description:**
+- Concise summary of functionality
+- Parameter descriptions
+- Return type schema
+
+**Implementation:**
+- Async/await for I/O operations
+- Proper error handling with actionable messages
+- Support pagination where applicable
+- Return both text content and structured data when using modern SDKs
+
+**Annotations:**
+- `readOnlyHint`: true/false
+- `destructiveHint`: true/false
+- `idempotentHint`: true/false
+- `openWorldHint`: true/false
+
+---
+
+### Phase 3: Review and Test
+
+#### 3.1 Code Quality
+
+Review for:
+- No duplicated code (DRY principle)
+- Consistent error handling
+- Full type coverage
+- Clear tool descriptions
+
+#### 3.2 Build and Test
+
+**TypeScript:**
+- Run `npm run build` to verify compilation
+- Test with MCP Inspector: `npx @modelcontextprotocol/inspector`
+
+**Python:**
+- Verify syntax: `python -m py_compile your_server.py`
+- Test with MCP Inspector
+
+See language-specific guides for detailed testing approaches and quality checklists.
+
+---
+
+### Phase 4: Create Evaluations
+
+After implementing your MCP server, create comprehensive evaluations to test its effectiveness.
+
+**Load [✅ Evaluation Guide](./reference/evaluation.md) for complete evaluation guidelines.**
+
+#### 4.1 Understand Evaluation Purpose
+
+Use evaluations to test whether LLMs can effectively use your MCP server to answer realistic, complex questions.
+
+#### 4.2 Create 10 Evaluation Questions
+
+To create effective evaluations, follow the process outlined in the evaluation guide:
+
+1. **Tool Inspection**: List available tools and understand their capabilities
+2. **Content Exploration**: Use READ-ONLY operations to explore available data
+3. **Question Generation**: Create 10 complex, realistic questions
+4. **Answer Verification**: Solve each question yourself to verify answers
+
+#### 4.3 Evaluation Requirements
+
+Ensure each question is:
+- **Independent**: Not dependent on other questions
+- **Read-only**: Only non-destructive operations required
+- **Complex**: Requiring multiple tool calls and deep exploration
+- **Realistic**: Based on real use cases humans would care about
+- **Verifiable**: Single, clear answer that can be verified by string comparison
+- **Stable**: Answer won't change over time
+
+#### 4.4 Output Format
+
+Create an XML file with this structure:
+
+```xml
+
+
+ Find discussions about AI model launches with animal codenames. One model needed a specific safety designation that uses the format ASL-X. What number X was being determined for the model named after a spotted wild cat?
+ 3
+
+
+
+```
+
+---
+
+# Reference Files
+
+## 📚 Documentation Library
+
+Load these resources as needed during development:
+
+### Core MCP Documentation (Load First)
+- **MCP Protocol**: Start with sitemap at `https://modelcontextprotocol.io/sitemap.xml`, then fetch specific pages with `.md` suffix
+- [📋 MCP Best Practices](./reference/mcp_best_practices.md) - Universal MCP guidelines including:
+ - Server and tool naming conventions
+ - Response format guidelines (JSON vs Markdown)
+ - Pagination best practices
+ - Transport selection (streamable HTTP vs stdio)
+ - Security and error handling standards
+
+### SDK Documentation (Load During Phase 1/2)
+- **Python SDK**: Fetch from `https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/README.md`
+- **TypeScript SDK**: Fetch from `https://raw.githubusercontent.com/modelcontextprotocol/typescript-sdk/main/README.md`
+
+### Language-Specific Implementation Guides (Load During Phase 2)
+- [🐍 Python Implementation Guide](./reference/python_mcp_server.md) - Complete Python/FastMCP guide with:
+ - Server initialization patterns
+ - Pydantic model examples
+ - Tool registration with `@mcp.tool`
+ - Complete working examples
+ - Quality checklist
+
+- [⚡ TypeScript Implementation Guide](./reference/node_mcp_server.md) - Complete TypeScript guide with:
+ - Project structure
+ - Zod schema patterns
+ - Tool registration with `server.registerTool`
+ - Complete working examples
+ - Quality checklist
+
+### Evaluation Guide (Load During Phase 4)
+- [✅ Evaluation Guide](./reference/evaluation.md) - Complete evaluation creation guide with:
+ - Question creation guidelines
+ - Answer verification strategies
+ - XML format specifications
+ - Example questions and answers
+ - Running an evaluation with the provided scripts
diff --git a/.claude/skills/mcp-builder/reference/evaluation.md b/.claude/skills/mcp-builder/reference/evaluation.md
new file mode 100644
index 0000000..87e9bb7
--- /dev/null
+++ b/.claude/skills/mcp-builder/reference/evaluation.md
@@ -0,0 +1,602 @@
+# MCP Server Evaluation Guide
+
+## Overview
+
+This document provides guidance on creating comprehensive evaluations for MCP servers. Evaluations test whether LLMs can effectively use your MCP server to answer realistic, complex questions using only the tools provided.
+
+---
+
+## Quick Reference
+
+### Evaluation Requirements
+- Create 10 human-readable questions
+- Questions must be READ-ONLY, INDEPENDENT, NON-DESTRUCTIVE
+- Each question requires multiple tool calls (potentially dozens)
+- Answers must be single, verifiable values
+- Answers must be STABLE (won't change over time)
+
+### Output Format
+```xml
+
+
+ Your question here
+ Single verifiable answer
+
+
+```
+
+---
+
+## Purpose of Evaluations
+
+The measure of quality of an MCP server is NOT how well or comprehensively the server implements tools, but how well these implementations (input/output schemas, docstrings/descriptions, functionality) enable LLMs with no other context and access ONLY to the MCP servers to answer realistic and difficult questions.
+
+## Evaluation Overview
+
+Create 10 human-readable questions requiring ONLY READ-ONLY, INDEPENDENT, NON-DESTRUCTIVE, and IDEMPOTENT operations to answer. Each question should be:
+- Realistic
+- Clear and concise
+- Unambiguous
+- Complex, requiring potentially dozens of tool calls or steps
+- Answerable with a single, verifiable value that you identify in advance
+
+## Question Guidelines
+
+### Core Requirements
+
+1. **Questions MUST be independent**
+ - Each question should NOT depend on the answer to any other question
+ - Should not assume prior write operations from processing another question
+
+2. **Questions MUST require ONLY NON-DESTRUCTIVE AND IDEMPOTENT tool use**
+ - Should not instruct or require modifying state to arrive at the correct answer
+
+3. **Questions must be REALISTIC, CLEAR, CONCISE, and COMPLEX**
+ - Must require another LLM to use multiple (potentially dozens of) tools or steps to answer
+
+### Complexity and Depth
+
+4. **Questions must require deep exploration**
+ - Consider multi-hop questions requiring multiple sub-questions and sequential tool calls
+ - Each step should benefit from information found in previous questions
+
+5. **Questions may require extensive paging**
+ - May need paging through multiple pages of results
+ - May require querying old data (1-2 years out-of-date) to find niche information
+ - The questions must be DIFFICULT
+
+6. **Questions must require deep understanding**
+ - Rather than surface-level knowledge
+ - May pose complex ideas as True/False questions requiring evidence
+ - May use multiple-choice format where LLM must search different hypotheses
+
+7. **Questions must not be solvable with straightforward keyword search**
+ - Do not include specific keywords from the target content
+ - Use synonyms, related concepts, or paraphrases
+ - Require multiple searches, analyzing multiple related items, extracting context, then deriving the answer
+
+### Tool Testing
+
+8. **Questions should stress-test tool return values**
+ - May elicit tools returning large JSON objects or lists, overwhelming the LLM
+ - Should require understanding multiple modalities of data:
+ - IDs and names
+ - Timestamps and datetimes (months, days, years, seconds)
+ - File IDs, names, extensions, and mimetypes
+ - URLs, GIDs, etc.
+ - Should probe the tool's ability to return all useful forms of data
+
+9. **Questions should MOSTLY reflect real human use cases**
+ - The kinds of information retrieval tasks that HUMANS assisted by an LLM would care about
+
+10. **Questions may require dozens of tool calls**
+ - This challenges LLMs with limited context
+ - Encourages MCP server tools to reduce information returned
+
+11. **Include ambiguous questions**
+ - May be ambiguous OR require difficult decisions on which tools to call
+ - Force the LLM to potentially make mistakes or misinterpret
+ - Ensure that despite AMBIGUITY, there is STILL A SINGLE VERIFIABLE ANSWER
+
+### Stability
+
+12. **Questions must be designed so the answer DOES NOT CHANGE**
+ - Do not ask questions that rely on "current state" which is dynamic
+ - For example, do not count:
+ - Number of reactions to a post
+ - Number of replies to a thread
+ - Number of members in a channel
+
+13. **DO NOT let the MCP server RESTRICT the kinds of questions you create**
+ - Create challenging and complex questions
+ - Some may not be solvable with the available MCP server tools
+ - Questions may require specific output formats (datetime vs. epoch time, JSON vs. MARKDOWN)
+ - Questions may require dozens of tool calls to complete
+
+## Answer Guidelines
+
+### Verification
+
+1. **Answers must be VERIFIABLE via direct string comparison**
+ - If the answer can be re-written in many formats, clearly specify the output format in the QUESTION
+ - Examples: "Use YYYY/MM/DD.", "Respond True or False.", "Answer A, B, C, or D and nothing else."
+ - Answer should be a single VERIFIABLE value such as:
+ - User ID, user name, display name, first name, last name
+ - Channel ID, channel name
+ - Message ID, string
+ - URL, title
+ - Numerical quantity
+ - Timestamp, datetime
+ - Boolean (for True/False questions)
+ - Email address, phone number
+ - File ID, file name, file extension
+ - Multiple choice answer
+ - Answers must not require special formatting or complex, structured output
+ - Answer will be verified using DIRECT STRING COMPARISON
+
+### Readability
+
+2. **Answers should generally prefer HUMAN-READABLE formats**
+ - Examples: names, first name, last name, datetime, file name, message string, URL, yes/no, true/false, a/b/c/d
+ - Rather than opaque IDs (though IDs are acceptable)
+ - The VAST MAJORITY of answers should be human-readable
+
+### Stability
+
+3. **Answers must be STABLE/STATIONARY**
+ - Look at old content (e.g., conversations that have ended, projects that have launched, questions answered)
+ - Create QUESTIONS based on "closed" concepts that will always return the same answer
+ - Questions may ask to consider a fixed time window to insulate from non-stationary answers
+ - Rely on context UNLIKELY to change
+ - Example: if finding a paper name, be SPECIFIC enough so answer is not confused with papers published later
+
+4. **Answers must be CLEAR and UNAMBIGUOUS**
+ - Questions must be designed so there is a single, clear answer
+ - Answer can be derived from using the MCP server tools
+
+### Diversity
+
+5. **Answers must be DIVERSE**
+ - Answer should be a single VERIFIABLE value in diverse modalities and formats
+ - User concept: user ID, user name, display name, first name, last name, email address, phone number
+ - Channel concept: channel ID, channel name, channel topic
+ - Message concept: message ID, message string, timestamp, month, day, year
+
+6. **Answers must NOT be complex structures**
+ - Not a list of values
+ - Not a complex object
+ - Not a list of IDs or strings
+ - Not natural language text
+ - UNLESS the answer can be straightforwardly verified using DIRECT STRING COMPARISON
+ - And can be realistically reproduced
+ - It should be unlikely that an LLM would return the same list in any other order or format
+
+## Evaluation Process
+
+### Step 1: Documentation Inspection
+
+Read the documentation of the target API to understand:
+- Available endpoints and functionality
+- If ambiguity exists, fetch additional information from the web
+- Parallelize this step AS MUCH AS POSSIBLE
+- Ensure each subagent is ONLY examining documentation from the file system or on the web
+
+### Step 2: Tool Inspection
+
+List the tools available in the MCP server:
+- Inspect the MCP server directly
+- Understand input/output schemas, docstrings, and descriptions
+- WITHOUT calling the tools themselves at this stage
+
+### Step 3: Developing Understanding
+
+Repeat steps 1 & 2 until you have a good understanding:
+- Iterate multiple times
+- Think about the kinds of tasks you want to create
+- Refine your understanding
+- At NO stage should you READ the code of the MCP server implementation itself
+- Use your intuition and understanding to create reasonable, realistic, but VERY challenging tasks
+
+### Step 4: Read-Only Content Inspection
+
+After understanding the API and tools, USE the MCP server tools:
+- Inspect content using READ-ONLY and NON-DESTRUCTIVE operations ONLY
+- Goal: identify specific content (e.g., users, channels, messages, projects, tasks) for creating realistic questions
+- Should NOT call any tools that modify state
+- Will NOT read the code of the MCP server implementation itself
+- Parallelize this step with individual sub-agents pursuing independent explorations
+- Ensure each subagent is only performing READ-ONLY, NON-DESTRUCTIVE, and IDEMPOTENT operations
+- BE CAREFUL: SOME TOOLS may return LOTS OF DATA which would cause you to run out of CONTEXT
+- Make INCREMENTAL, SMALL, AND TARGETED tool calls for exploration
+- In all tool call requests, use the `limit` parameter to limit results (<10)
+- Use pagination
+
+### Step 5: Task Generation
+
+After inspecting the content, create 10 human-readable questions:
+- An LLM should be able to answer these with the MCP server
+- Follow all question and answer guidelines above
+
+## Output Format
+
+Each QA pair consists of a question and an answer. The output should be an XML file with this structure:
+
+```xml
+
+
+ Find the project created in Q2 2024 with the highest number of completed tasks. What is the project name?
+ Website Redesign
+
+
+ Search for issues labeled as "bug" that were closed in March 2024. Which user closed the most issues? Provide their username.
+ sarah_dev
+
+
+ Look for pull requests that modified files in the /api directory and were merged between January 1 and January 31, 2024. How many different contributors worked on these PRs?
+ 7
+
+
+ Find the repository with the most stars that was created before 2023. What is the repository name?
+ data-pipeline
+
+
+```
+
+## Evaluation Examples
+
+### Good Questions
+
+**Example 1: Multi-hop question requiring deep exploration (GitHub MCP)**
+```xml
+
+ Find the repository that was archived in Q3 2023 and had previously been the most forked project in the organization. What was the primary programming language used in that repository?
+ Python
+
+```
+
+This question is good because:
+- Requires multiple searches to find archived repositories
+- Needs to identify which had the most forks before archival
+- Requires examining repository details for the language
+- Answer is a simple, verifiable value
+- Based on historical (closed) data that won't change
+
+**Example 2: Requires understanding context without keyword matching (Project Management MCP)**
+```xml
+
+ Locate the initiative focused on improving customer onboarding that was completed in late 2023. The project lead created a retrospective document after completion. What was the lead's role title at that time?
+ Product Manager
+
+```
+
+This question is good because:
+- Doesn't use specific project name ("initiative focused on improving customer onboarding")
+- Requires finding completed projects from specific timeframe
+- Needs to identify the project lead and their role
+- Requires understanding context from retrospective documents
+- Answer is human-readable and stable
+- Based on completed work (won't change)
+
+**Example 3: Complex aggregation requiring multiple steps (Issue Tracker MCP)**
+```xml
+
+ Among all bugs reported in January 2024 that were marked as critical priority, which assignee resolved the highest percentage of their assigned bugs within 48 hours? Provide the assignee's username.
+ alex_eng
+
+```
+
+This question is good because:
+- Requires filtering bugs by date, priority, and status
+- Needs to group by assignee and calculate resolution rates
+- Requires understanding timestamps to determine 48-hour windows
+- Tests pagination (potentially many bugs to process)
+- Answer is a single username
+- Based on historical data from specific time period
+
+**Example 4: Requires synthesis across multiple data types (CRM MCP)**
+```xml
+
+ Find the account that upgraded from the Starter to Enterprise plan in Q4 2023 and had the highest annual contract value. What industry does this account operate in?
+ Healthcare
+
+```
+
+This question is good because:
+- Requires understanding subscription tier changes
+- Needs to identify upgrade events in specific timeframe
+- Requires comparing contract values
+- Must access account industry information
+- Answer is simple and verifiable
+- Based on completed historical transactions
+
+### Poor Questions
+
+**Example 1: Answer changes over time**
+```xml
+
+ How many open issues are currently assigned to the engineering team?
+ 47
+
+```
+
+This question is poor because:
+- The answer will change as issues are created, closed, or reassigned
+- Not based on stable/stationary data
+- Relies on "current state" which is dynamic
+
+**Example 2: Too easy with keyword search**
+```xml
+
+ Find the pull request with title "Add authentication feature" and tell me who created it.
+ developer123
+
+```
+
+This question is poor because:
+- Can be solved with a straightforward keyword search for exact title
+- Doesn't require deep exploration or understanding
+- No synthesis or analysis needed
+
+**Example 3: Ambiguous answer format**
+```xml
+
+ List all the repositories that have Python as their primary language.
+ repo1, repo2, repo3, data-pipeline, ml-tools
+
+```
+
+This question is poor because:
+- Answer is a list that could be returned in any order
+- Difficult to verify with direct string comparison
+- LLM might format differently (JSON array, comma-separated, newline-separated)
+- Better to ask for a specific aggregate (count) or superlative (most stars)
+
+## Verification Process
+
+After creating evaluations:
+
+1. **Examine the XML file** to understand the schema
+2. **Load each task instruction** and in parallel using the MCP server and tools, identify the correct answer by attempting to solve the task YOURSELF
+3. **Flag any operations** that require WRITE or DESTRUCTIVE operations
+4. **Accumulate all CORRECT answers** and replace any incorrect answers in the document
+5. **Remove any ``** that require WRITE or DESTRUCTIVE operations
+
+Remember to parallelize solving tasks to avoid running out of context, then accumulate all answers and make changes to the file at the end.
+
+## Tips for Creating Quality Evaluations
+
+1. **Think Hard and Plan Ahead** before generating tasks
+2. **Parallelize Where Opportunity Arises** to speed up the process and manage context
+3. **Focus on Realistic Use Cases** that humans would actually want to accomplish
+4. **Create Challenging Questions** that test the limits of the MCP server's capabilities
+5. **Ensure Stability** by using historical data and closed concepts
+6. **Verify Answers** by solving the questions yourself using the MCP server tools
+7. **Iterate and Refine** based on what you learn during the process
+
+---
+
+# Running Evaluations
+
+After creating your evaluation file, you can use the provided evaluation harness to test your MCP server.
+
+## Setup
+
+1. **Install Dependencies**
+
+ ```bash
+ pip install -r scripts/requirements.txt
+ ```
+
+ Or install manually:
+ ```bash
+ pip install anthropic mcp
+ ```
+
+2. **Set API Key**
+
+ ```bash
+ export ANTHROPIC_API_KEY=your_api_key_here
+ ```
+
+## Evaluation File Format
+
+Evaluation files use XML format with `` elements:
+
+```xml
+
+
+ Find the project created in Q2 2024 with the highest number of completed tasks. What is the project name?
+ Website Redesign
+
+
+ Search for issues labeled as "bug" that were closed in March 2024. Which user closed the most issues? Provide their username.
+ sarah_dev
+
+
+```
+
+## Running Evaluations
+
+The evaluation script (`scripts/evaluation.py`) supports three transport types:
+
+**Important:**
+- **stdio transport**: The evaluation script automatically launches and manages the MCP server process for you. Do not run the server manually.
+- **sse/http transports**: You must start the MCP server separately before running the evaluation. The script connects to the already-running server at the specified URL.
+
+### 1. Local STDIO Server
+
+For locally-run MCP servers (script launches the server automatically):
+
+```bash
+python scripts/evaluation.py \
+ -t stdio \
+ -c python \
+ -a my_mcp_server.py \
+ evaluation.xml
+```
+
+With environment variables:
+```bash
+python scripts/evaluation.py \
+ -t stdio \
+ -c python \
+ -a my_mcp_server.py \
+ -e API_KEY=abc123 \
+ -e DEBUG=true \
+ evaluation.xml
+```
+
+### 2. Server-Sent Events (SSE)
+
+For SSE-based MCP servers (you must start the server first):
+
+```bash
+python scripts/evaluation.py \
+ -t sse \
+ -u https://example.com/mcp \
+ -H "Authorization: Bearer token123" \
+ -H "X-Custom-Header: value" \
+ evaluation.xml
+```
+
+### 3. HTTP (Streamable HTTP)
+
+For HTTP-based MCP servers (you must start the server first):
+
+```bash
+python scripts/evaluation.py \
+ -t http \
+ -u https://example.com/mcp \
+ -H "Authorization: Bearer token123" \
+ evaluation.xml
+```
+
+## Command-Line Options
+
+```
+usage: evaluation.py [-h] [-t {stdio,sse,http}] [-m MODEL] [-c COMMAND]
+ [-a ARGS [ARGS ...]] [-e ENV [ENV ...]] [-u URL]
+ [-H HEADERS [HEADERS ...]] [-o OUTPUT]
+ eval_file
+
+positional arguments:
+ eval_file Path to evaluation XML file
+
+optional arguments:
+ -h, --help Show help message
+ -t, --transport Transport type: stdio, sse, or http (default: stdio)
+ -m, --model Claude model to use (default: claude-3-7-sonnet-20250219)
+ -o, --output Output file for report (default: print to stdout)
+
+stdio options:
+ -c, --command Command to run MCP server (e.g., python, node)
+ -a, --args Arguments for the command (e.g., server.py)
+ -e, --env Environment variables in KEY=VALUE format
+
+sse/http options:
+ -u, --url MCP server URL
+ -H, --header HTTP headers in 'Key: Value' format
+```
+
+## Output
+
+The evaluation script generates a detailed report including:
+
+- **Summary Statistics**:
+ - Accuracy (correct/total)
+ - Average task duration
+ - Average tool calls per task
+ - Total tool calls
+
+- **Per-Task Results**:
+ - Prompt and expected response
+ - Actual response from the agent
+ - Whether the answer was correct (✅/❌)
+ - Duration and tool call details
+ - Agent's summary of its approach
+ - Agent's feedback on the tools
+
+### Save Report to File
+
+```bash
+python scripts/evaluation.py \
+ -t stdio \
+ -c python \
+ -a my_server.py \
+ -o evaluation_report.md \
+ evaluation.xml
+```
+
+## Complete Example Workflow
+
+Here's a complete example of creating and running an evaluation:
+
+1. **Create your evaluation file** (`my_evaluation.xml`):
+
+```xml
+
+
+ Find the user who created the most issues in January 2024. What is their username?
+ alice_developer
+
+
+ Among all pull requests merged in Q1 2024, which repository had the highest number? Provide the repository name.
+ backend-api
+
+
+ Find the project that was completed in December 2023 and had the longest duration from start to finish. How many days did it take?
+ 127
+
+
+```
+
+2. **Install dependencies**:
+
+```bash
+pip install -r scripts/requirements.txt
+export ANTHROPIC_API_KEY=your_api_key
+```
+
+3. **Run evaluation**:
+
+```bash
+python scripts/evaluation.py \
+ -t stdio \
+ -c python \
+ -a github_mcp_server.py \
+ -e GITHUB_TOKEN=ghp_xxx \
+ -o github_eval_report.md \
+ my_evaluation.xml
+```
+
+4. **Review the report** in `github_eval_report.md` to:
+ - See which questions passed/failed
+ - Read the agent's feedback on your tools
+ - Identify areas for improvement
+ - Iterate on your MCP server design
+
+## Troubleshooting
+
+### Connection Errors
+
+If you get connection errors:
+- **STDIO**: Verify the command and arguments are correct
+- **SSE/HTTP**: Check the URL is accessible and headers are correct
+- Ensure any required API keys are set in environment variables or headers
+
+### Low Accuracy
+
+If many evaluations fail:
+- Review the agent's feedback for each task
+- Check if tool descriptions are clear and comprehensive
+- Verify input parameters are well-documented
+- Consider whether tools return too much or too little data
+- Ensure error messages are actionable
+
+### Timeout Issues
+
+If tasks are timing out:
+- Use a more capable model (e.g., `claude-3-7-sonnet-20250219`)
+- Check if tools are returning too much data
+- Verify pagination is working correctly
+- Consider simplifying complex questions
\ No newline at end of file
diff --git a/.claude/skills/mcp-builder/reference/mcp_best_practices.md b/.claude/skills/mcp-builder/reference/mcp_best_practices.md
new file mode 100644
index 0000000..b9d343c
--- /dev/null
+++ b/.claude/skills/mcp-builder/reference/mcp_best_practices.md
@@ -0,0 +1,249 @@
+# MCP Server Best Practices
+
+## Quick Reference
+
+### Server Naming
+- **Python**: `{service}_mcp` (e.g., `slack_mcp`)
+- **Node/TypeScript**: `{service}-mcp-server` (e.g., `slack-mcp-server`)
+
+### Tool Naming
+- Use snake_case with service prefix
+- Format: `{service}_{action}_{resource}`
+- Example: `slack_send_message`, `github_create_issue`
+
+### Response Formats
+- Support both JSON and Markdown formats
+- JSON for programmatic processing
+- Markdown for human readability
+
+### Pagination
+- Always respect `limit` parameter
+- Return `has_more`, `next_offset`, `total_count`
+- Default to 20-50 items
+
+### Transport
+- **Streamable HTTP**: For remote servers, multi-client scenarios
+- **stdio**: For local integrations, command-line tools
+- Avoid SSE (deprecated in favor of streamable HTTP)
+
+---
+
+## Server Naming Conventions
+
+Follow these standardized naming patterns:
+
+**Python**: Use format `{service}_mcp` (lowercase with underscores)
+- Examples: `slack_mcp`, `github_mcp`, `jira_mcp`
+
+**Node/TypeScript**: Use format `{service}-mcp-server` (lowercase with hyphens)
+- Examples: `slack-mcp-server`, `github-mcp-server`, `jira-mcp-server`
+
+The name should be general, descriptive of the service being integrated, easy to infer from the task description, and without version numbers.
+
+---
+
+## Tool Naming and Design
+
+### Tool Naming
+
+1. **Use snake_case**: `search_users`, `create_project`, `get_channel_info`
+2. **Include service prefix**: Anticipate that your MCP server may be used alongside other MCP servers
+ - Use `slack_send_message` instead of just `send_message`
+ - Use `github_create_issue` instead of just `create_issue`
+3. **Be action-oriented**: Start with verbs (get, list, search, create, etc.)
+4. **Be specific**: Avoid generic names that could conflict with other servers
+
+### Tool Design
+
+- Tool descriptions must narrowly and unambiguously describe functionality
+- Descriptions must precisely match actual functionality
+- Provide tool annotations (readOnlyHint, destructiveHint, idempotentHint, openWorldHint)
+- Keep tool operations focused and atomic
+
+---
+
+## Response Formats
+
+All tools that return data should support multiple formats:
+
+### JSON Format (`response_format="json"`)
+- Machine-readable structured data
+- Include all available fields and metadata
+- Consistent field names and types
+- Use for programmatic processing
+
+### Markdown Format (`response_format="markdown"`, typically default)
+- Human-readable formatted text
+- Use headers, lists, and formatting for clarity
+- Convert timestamps to human-readable format
+- Show display names with IDs in parentheses
+- Omit verbose metadata
+
+---
+
+## Pagination
+
+For tools that list resources:
+
+- **Always respect the `limit` parameter**
+- **Implement pagination**: Use `offset` or cursor-based pagination
+- **Return pagination metadata**: Include `has_more`, `next_offset`/`next_cursor`, `total_count`
+- **Never load all results into memory**: Especially important for large datasets
+- **Default to reasonable limits**: 20-50 items is typical
+
+Example pagination response:
+```json
+{
+ "total": 150,
+ "count": 20,
+ "offset": 0,
+ "items": [...],
+ "has_more": true,
+ "next_offset": 20
+}
+```
+
+---
+
+## Transport Options
+
+### Streamable HTTP
+
+**Best for**: Remote servers, web services, multi-client scenarios
+
+**Characteristics**:
+- Bidirectional communication over HTTP
+- Supports multiple simultaneous clients
+- Can be deployed as a web service
+- Enables server-to-client notifications
+
+**Use when**:
+- Serving multiple clients simultaneously
+- Deploying as a cloud service
+- Integration with web applications
+
+### stdio
+
+**Best for**: Local integrations, command-line tools
+
+**Characteristics**:
+- Standard input/output stream communication
+- Simple setup, no network configuration needed
+- Runs as a subprocess of the client
+
+**Use when**:
+- Building tools for local development environments
+- Integrating with desktop applications
+- Single-user, single-session scenarios
+
+**Note**: stdio servers should NOT log to stdout (use stderr for logging)
+
+### Transport Selection
+
+| Criterion | stdio | Streamable HTTP |
+|-----------|-------|-----------------|
+| **Deployment** | Local | Remote |
+| **Clients** | Single | Multiple |
+| **Complexity** | Low | Medium |
+| **Real-time** | No | Yes |
+
+---
+
+## Security Best Practices
+
+### Authentication and Authorization
+
+**OAuth 2.1**:
+- Use secure OAuth 2.1 with certificates from recognized authorities
+- Validate access tokens before processing requests
+- Only accept tokens specifically intended for your server
+
+**API Keys**:
+- Store API keys in environment variables, never in code
+- Validate keys on server startup
+- Provide clear error messages when authentication fails
+
+### Input Validation
+
+- Sanitize file paths to prevent directory traversal
+- Validate URLs and external identifiers
+- Check parameter sizes and ranges
+- Prevent command injection in system calls
+- Use schema validation (Pydantic/Zod) for all inputs
+
+### Error Handling
+
+- Don't expose internal errors to clients
+- Log security-relevant errors server-side
+- Provide helpful but not revealing error messages
+- Clean up resources after errors
+
+### DNS Rebinding Protection
+
+For streamable HTTP servers running locally:
+- Enable DNS rebinding protection
+- Validate the `Origin` header on all incoming connections
+- Bind to `127.0.0.1` rather than `0.0.0.0`
+
+---
+
+## Tool Annotations
+
+Provide annotations to help clients understand tool behavior:
+
+| Annotation | Type | Default | Description |
+|-----------|------|---------|-------------|
+| `readOnlyHint` | boolean | false | Tool does not modify its environment |
+| `destructiveHint` | boolean | true | Tool may perform destructive updates |
+| `idempotentHint` | boolean | false | Repeated calls with same args have no additional effect |
+| `openWorldHint` | boolean | true | Tool interacts with external entities |
+
+**Important**: Annotations are hints, not security guarantees. Clients should not make security-critical decisions based solely on annotations.
+
+---
+
+## Error Handling
+
+- Use standard JSON-RPC error codes
+- Report tool errors within result objects (not protocol-level errors)
+- Provide helpful, specific error messages with suggested next steps
+- Don't expose internal implementation details
+- Clean up resources properly on errors
+
+Example error handling:
+```typescript
+try {
+ const result = performOperation();
+ return { content: [{ type: "text", text: result }] };
+} catch (error) {
+ return {
+ isError: true,
+ content: [{
+ type: "text",
+ text: `Error: ${error.message}. Try using filter='active_only' to reduce results.`
+ }]
+ };
+}
+```
+
+---
+
+## Testing Requirements
+
+Comprehensive testing should cover:
+
+- **Functional testing**: Verify correct execution with valid/invalid inputs
+- **Integration testing**: Test interaction with external systems
+- **Security testing**: Validate auth, input sanitization, rate limiting
+- **Performance testing**: Check behavior under load, timeouts
+- **Error handling**: Ensure proper error reporting and cleanup
+
+---
+
+## Documentation Requirements
+
+- Provide clear documentation of all tools and capabilities
+- Include working examples (at least 3 per major feature)
+- Document security considerations
+- Specify required permissions and access levels
+- Document rate limits and performance characteristics
diff --git a/.claude/skills/mcp-builder/reference/node_mcp_server.md b/.claude/skills/mcp-builder/reference/node_mcp_server.md
new file mode 100644
index 0000000..f6e5df9
--- /dev/null
+++ b/.claude/skills/mcp-builder/reference/node_mcp_server.md
@@ -0,0 +1,970 @@
+# Node/TypeScript MCP Server Implementation Guide
+
+## Overview
+
+This document provides Node/TypeScript-specific best practices and examples for implementing MCP servers using the MCP TypeScript SDK. It covers project structure, server setup, tool registration patterns, input validation with Zod, error handling, and complete working examples.
+
+---
+
+## Quick Reference
+
+### Key Imports
+```typescript
+import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
+import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
+import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+import express from "express";
+import { z } from "zod";
+```
+
+### Server Initialization
+```typescript
+const server = new McpServer({
+ name: "service-mcp-server",
+ version: "1.0.0"
+});
+```
+
+### Tool Registration Pattern
+```typescript
+server.registerTool(
+ "tool_name",
+ {
+ title: "Tool Display Name",
+ description: "What the tool does",
+ inputSchema: { param: z.string() },
+ outputSchema: { result: z.string() }
+ },
+ async ({ param }) => {
+ const output = { result: `Processed: ${param}` };
+ return {
+ content: [{ type: "text", text: JSON.stringify(output) }],
+ structuredContent: output // Modern pattern for structured data
+ };
+ }
+);
+```
+
+---
+
+## MCP TypeScript SDK
+
+The official MCP TypeScript SDK provides:
+- `McpServer` class for server initialization
+- `registerTool` method for tool registration
+- Zod schema integration for runtime input validation
+- Type-safe tool handler implementations
+
+**IMPORTANT - Use Modern APIs Only:**
+- **DO use**: `server.registerTool()`, `server.registerResource()`, `server.registerPrompt()`
+- **DO NOT use**: Old deprecated APIs such as `server.tool()`, `server.setRequestHandler(ListToolsRequestSchema, ...)`, or manual handler registration
+- The `register*` methods provide better type safety, automatic schema handling, and are the recommended approach
+
+See the MCP SDK documentation in the references for complete details.
+
+## Server Naming Convention
+
+Node/TypeScript MCP servers must follow this naming pattern:
+- **Format**: `{service}-mcp-server` (lowercase with hyphens)
+- **Examples**: `github-mcp-server`, `jira-mcp-server`, `stripe-mcp-server`
+
+The name should be:
+- General (not tied to specific features)
+- Descriptive of the service/API being integrated
+- Easy to infer from the task description
+- Without version numbers or dates
+
+## Project Structure
+
+Create the following structure for Node/TypeScript MCP servers:
+
+```
+{service}-mcp-server/
+├── package.json
+├── tsconfig.json
+├── README.md
+├── src/
+│ ├── index.ts # Main entry point with McpServer initialization
+│ ├── types.ts # TypeScript type definitions and interfaces
+│ ├── tools/ # Tool implementations (one file per domain)
+│ ├── services/ # API clients and shared utilities
+│ ├── schemas/ # Zod validation schemas
+│ └── constants.ts # Shared constants (API_URL, CHARACTER_LIMIT, etc.)
+└── dist/ # Built JavaScript files (entry point: dist/index.js)
+```
+
+## Tool Implementation
+
+### Tool Naming
+
+Use snake_case for tool names (e.g., "search_users", "create_project", "get_channel_info") with clear, action-oriented names.
+
+**Avoid Naming Conflicts**: Include the service context to prevent overlaps:
+- Use "slack_send_message" instead of just "send_message"
+- Use "github_create_issue" instead of just "create_issue"
+- Use "asana_list_tasks" instead of just "list_tasks"
+
+### Tool Structure
+
+Tools are registered using the `registerTool` method with the following requirements:
+- Use Zod schemas for runtime input validation and type safety
+- The `description` field must be explicitly provided - JSDoc comments are NOT automatically extracted
+- Explicitly provide `title`, `description`, `inputSchema`, and `annotations`
+- The `inputSchema` must be a Zod schema object (not a JSON schema)
+- Type all parameters and return values explicitly
+
+```typescript
+import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
+import { z } from "zod";
+
+const server = new McpServer({
+ name: "example-mcp",
+ version: "1.0.0"
+});
+
+// Zod schema for input validation
+const UserSearchInputSchema = z.object({
+ query: z.string()
+ .min(2, "Query must be at least 2 characters")
+ .max(200, "Query must not exceed 200 characters")
+ .describe("Search string to match against names/emails"),
+ limit: z.number()
+ .int()
+ .min(1)
+ .max(100)
+ .default(20)
+ .describe("Maximum results to return"),
+ offset: z.number()
+ .int()
+ .min(0)
+ .default(0)
+ .describe("Number of results to skip for pagination"),
+ response_format: z.nativeEnum(ResponseFormat)
+ .default(ResponseFormat.MARKDOWN)
+ .describe("Output format: 'markdown' for human-readable or 'json' for machine-readable")
+}).strict();
+
+// Type definition from Zod schema
+type UserSearchInput = z.infer;
+
+server.registerTool(
+ "example_search_users",
+ {
+ title: "Search Example Users",
+ description: `Search for users in the Example system by name, email, or team.
+
+This tool searches across all user profiles in the Example platform, supporting partial matches and various search filters. It does NOT create or modify users, only searches existing ones.
+
+Args:
+ - query (string): Search string to match against names/emails
+ - limit (number): Maximum results to return, between 1-100 (default: 20)
+ - offset (number): Number of results to skip for pagination (default: 0)
+ - response_format ('markdown' | 'json'): Output format (default: 'markdown')
+
+Returns:
+ For JSON format: Structured data with schema:
+ {
+ "total": number, // Total number of matches found
+ "count": number, // Number of results in this response
+ "offset": number, // Current pagination offset
+ "users": [
+ {
+ "id": string, // User ID (e.g., "U123456789")
+ "name": string, // Full name (e.g., "John Doe")
+ "email": string, // Email address
+ "team": string, // Team name (optional)
+ "active": boolean // Whether user is active
+ }
+ ],
+ "has_more": boolean, // Whether more results are available
+ "next_offset": number // Offset for next page (if has_more is true)
+ }
+
+Examples:
+ - Use when: "Find all marketing team members" -> params with query="team:marketing"
+ - Use when: "Search for John's account" -> params with query="john"
+ - Don't use when: You need to create a user (use example_create_user instead)
+
+Error Handling:
+ - Returns "Error: Rate limit exceeded" if too many requests (429 status)
+ - Returns "No users found matching ''" if search returns empty`,
+ inputSchema: UserSearchInputSchema,
+ annotations: {
+ readOnlyHint: true,
+ destructiveHint: false,
+ idempotentHint: true,
+ openWorldHint: true
+ }
+ },
+ async (params: UserSearchInput) => {
+ try {
+ // Input validation is handled by Zod schema
+ // Make API request using validated parameters
+ const data = await makeApiRequest(
+ "users/search",
+ "GET",
+ undefined,
+ {
+ q: params.query,
+ limit: params.limit,
+ offset: params.offset
+ }
+ );
+
+ const users = data.users || [];
+ const total = data.total || 0;
+
+ if (!users.length) {
+ return {
+ content: [{
+ type: "text",
+ text: `No users found matching '${params.query}'`
+ }]
+ };
+ }
+
+ // Prepare structured output
+ const output = {
+ total,
+ count: users.length,
+ offset: params.offset,
+ users: users.map((user: any) => ({
+ id: user.id,
+ name: user.name,
+ email: user.email,
+ ...(user.team ? { team: user.team } : {}),
+ active: user.active ?? true
+ })),
+ has_more: total > params.offset + users.length,
+ ...(total > params.offset + users.length ? {
+ next_offset: params.offset + users.length
+ } : {})
+ };
+
+ // Format text representation based on requested format
+ let textContent: string;
+ if (params.response_format === ResponseFormat.MARKDOWN) {
+ const lines = [`# User Search Results: '${params.query}'`, "",
+ `Found ${total} users (showing ${users.length})`, ""];
+ for (const user of users) {
+ lines.push(`## ${user.name} (${user.id})`);
+ lines.push(`- **Email**: ${user.email}`);
+ if (user.team) lines.push(`- **Team**: ${user.team}`);
+ lines.push("");
+ }
+ textContent = lines.join("\n");
+ } else {
+ textContent = JSON.stringify(output, null, 2);
+ }
+
+ return {
+ content: [{ type: "text", text: textContent }],
+ structuredContent: output // Modern pattern for structured data
+ };
+ } catch (error) {
+ return {
+ content: [{
+ type: "text",
+ text: handleApiError(error)
+ }]
+ };
+ }
+ }
+);
+```
+
+## Zod Schemas for Input Validation
+
+Zod provides runtime type validation:
+
+```typescript
+import { z } from "zod";
+
+// Basic schema with validation
+const CreateUserSchema = z.object({
+ name: z.string()
+ .min(1, "Name is required")
+ .max(100, "Name must not exceed 100 characters"),
+ email: z.string()
+ .email("Invalid email format"),
+ age: z.number()
+ .int("Age must be a whole number")
+ .min(0, "Age cannot be negative")
+ .max(150, "Age cannot be greater than 150")
+}).strict(); // Use .strict() to forbid extra fields
+
+// Enums
+enum ResponseFormat {
+ MARKDOWN = "markdown",
+ JSON = "json"
+}
+
+const SearchSchema = z.object({
+ response_format: z.nativeEnum(ResponseFormat)
+ .default(ResponseFormat.MARKDOWN)
+ .describe("Output format")
+});
+
+// Optional fields with defaults
+const PaginationSchema = z.object({
+ limit: z.number()
+ .int()
+ .min(1)
+ .max(100)
+ .default(20)
+ .describe("Maximum results to return"),
+ offset: z.number()
+ .int()
+ .min(0)
+ .default(0)
+ .describe("Number of results to skip")
+});
+```
+
+## Response Format Options
+
+Support multiple output formats for flexibility:
+
+```typescript
+enum ResponseFormat {
+ MARKDOWN = "markdown",
+ JSON = "json"
+}
+
+const inputSchema = z.object({
+ query: z.string(),
+ response_format: z.nativeEnum(ResponseFormat)
+ .default(ResponseFormat.MARKDOWN)
+ .describe("Output format: 'markdown' for human-readable or 'json' for machine-readable")
+});
+```
+
+**Markdown format**:
+- Use headers, lists, and formatting for clarity
+- Convert timestamps to human-readable format
+- Show display names with IDs in parentheses
+- Omit verbose metadata
+- Group related information logically
+
+**JSON format**:
+- Return complete, structured data suitable for programmatic processing
+- Include all available fields and metadata
+- Use consistent field names and types
+
+## Pagination Implementation
+
+For tools that list resources:
+
+```typescript
+const ListSchema = z.object({
+ limit: z.number().int().min(1).max(100).default(20),
+ offset: z.number().int().min(0).default(0)
+});
+
+async function listItems(params: z.infer) {
+ const data = await apiRequest(params.limit, params.offset);
+
+ const response = {
+ total: data.total,
+ count: data.items.length,
+ offset: params.offset,
+ items: data.items,
+ has_more: data.total > params.offset + data.items.length,
+ next_offset: data.total > params.offset + data.items.length
+ ? params.offset + data.items.length
+ : undefined
+ };
+
+ return JSON.stringify(response, null, 2);
+}
+```
+
+## Character Limits and Truncation
+
+Add a CHARACTER_LIMIT constant to prevent overwhelming responses:
+
+```typescript
+// At module level in constants.ts
+export const CHARACTER_LIMIT = 25000; // Maximum response size in characters
+
+async function searchTool(params: SearchInput) {
+ let result = generateResponse(data);
+
+ // Check character limit and truncate if needed
+ if (result.length > CHARACTER_LIMIT) {
+ const truncatedData = data.slice(0, Math.max(1, data.length / 2));
+ response.data = truncatedData;
+ response.truncated = true;
+ response.truncation_message =
+ `Response truncated from ${data.length} to ${truncatedData.length} items. ` +
+ `Use 'offset' parameter or add filters to see more results.`;
+ result = JSON.stringify(response, null, 2);
+ }
+
+ return result;
+}
+```
+
+## Error Handling
+
+Provide clear, actionable error messages:
+
+```typescript
+import axios, { AxiosError } from "axios";
+
+function handleApiError(error: unknown): string {
+ if (error instanceof AxiosError) {
+ if (error.response) {
+ switch (error.response.status) {
+ case 404:
+ return "Error: Resource not found. Please check the ID is correct.";
+ case 403:
+ return "Error: Permission denied. You don't have access to this resource.";
+ case 429:
+ return "Error: Rate limit exceeded. Please wait before making more requests.";
+ default:
+ return `Error: API request failed with status ${error.response.status}`;
+ }
+ } else if (error.code === "ECONNABORTED") {
+ return "Error: Request timed out. Please try again.";
+ }
+ }
+ return `Error: Unexpected error occurred: ${error instanceof Error ? error.message : String(error)}`;
+}
+```
+
+## Shared Utilities
+
+Extract common functionality into reusable functions:
+
+```typescript
+// Shared API request function
+async function makeApiRequest(
+ endpoint: string,
+ method: "GET" | "POST" | "PUT" | "DELETE" = "GET",
+ data?: any,
+ params?: any
+): Promise {
+ try {
+ const response = await axios({
+ method,
+ url: `${API_BASE_URL}/${endpoint}`,
+ data,
+ params,
+ timeout: 30000,
+ headers: {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+ }
+ });
+ return response.data;
+ } catch (error) {
+ throw error;
+ }
+}
+```
+
+## Async/Await Best Practices
+
+Always use async/await for network requests and I/O operations:
+
+```typescript
+// Good: Async network request
+async function fetchData(resourceId: string): Promise {
+ const response = await axios.get(`${API_URL}/resource/${resourceId}`);
+ return response.data;
+}
+
+// Bad: Promise chains
+function fetchData(resourceId: string): Promise {
+ return axios.get(`${API_URL}/resource/${resourceId}`)
+ .then(response => response.data); // Harder to read and maintain
+}
+```
+
+## TypeScript Best Practices
+
+1. **Use Strict TypeScript**: Enable strict mode in tsconfig.json
+2. **Define Interfaces**: Create clear interface definitions for all data structures
+3. **Avoid `any`**: Use proper types or `unknown` instead of `any`
+4. **Zod for Runtime Validation**: Use Zod schemas to validate external data
+5. **Type Guards**: Create type guard functions for complex type checking
+6. **Error Handling**: Always use try-catch with proper error type checking
+7. **Null Safety**: Use optional chaining (`?.`) and nullish coalescing (`??`)
+
+```typescript
+// Good: Type-safe with Zod and interfaces
+interface UserResponse {
+ id: string;
+ name: string;
+ email: string;
+ team?: string;
+ active: boolean;
+}
+
+const UserSchema = z.object({
+ id: z.string(),
+ name: z.string(),
+ email: z.string().email(),
+ team: z.string().optional(),
+ active: z.boolean()
+});
+
+type User = z.infer;
+
+async function getUser(id: string): Promise {
+ const data = await apiCall(`/users/${id}`);
+ return UserSchema.parse(data); // Runtime validation
+}
+
+// Bad: Using any
+async function getUser(id: string): Promise {
+ return await apiCall(`/users/${id}`); // No type safety
+}
+```
+
+## Package Configuration
+
+### package.json
+
+```json
+{
+ "name": "{service}-mcp-server",
+ "version": "1.0.0",
+ "description": "MCP server for {Service} API integration",
+ "type": "module",
+ "main": "dist/index.js",
+ "scripts": {
+ "start": "node dist/index.js",
+ "dev": "tsx watch src/index.ts",
+ "build": "tsc",
+ "clean": "rm -rf dist"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "dependencies": {
+ "@modelcontextprotocol/sdk": "^1.6.1",
+ "axios": "^1.7.9",
+ "zod": "^3.23.8"
+ },
+ "devDependencies": {
+ "@types/node": "^22.10.0",
+ "tsx": "^4.19.2",
+ "typescript": "^5.7.2"
+ }
+}
+```
+
+### tsconfig.json
+
+```json
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "Node16",
+ "moduleResolution": "Node16",
+ "lib": ["ES2022"],
+ "outDir": "./dist",
+ "rootDir": "./src",
+ "strict": true,
+ "esModuleInterop": true,
+ "skipLibCheck": true,
+ "forceConsistentCasingInFileNames": true,
+ "declaration": true,
+ "declarationMap": true,
+ "sourceMap": true,
+ "allowSyntheticDefaultImports": true
+ },
+ "include": ["src/**/*"],
+ "exclude": ["node_modules", "dist"]
+}
+```
+
+## Complete Example
+
+```typescript
+#!/usr/bin/env node
+/**
+ * MCP Server for Example Service.
+ *
+ * This server provides tools to interact with Example API, including user search,
+ * project management, and data export capabilities.
+ */
+
+import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
+import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+import { z } from "zod";
+import axios, { AxiosError } from "axios";
+
+// Constants
+const API_BASE_URL = "https://api.example.com/v1";
+const CHARACTER_LIMIT = 25000;
+
+// Enums
+enum ResponseFormat {
+ MARKDOWN = "markdown",
+ JSON = "json"
+}
+
+// Zod schemas
+const UserSearchInputSchema = z.object({
+ query: z.string()
+ .min(2, "Query must be at least 2 characters")
+ .max(200, "Query must not exceed 200 characters")
+ .describe("Search string to match against names/emails"),
+ limit: z.number()
+ .int()
+ .min(1)
+ .max(100)
+ .default(20)
+ .describe("Maximum results to return"),
+ offset: z.number()
+ .int()
+ .min(0)
+ .default(0)
+ .describe("Number of results to skip for pagination"),
+ response_format: z.nativeEnum(ResponseFormat)
+ .default(ResponseFormat.MARKDOWN)
+ .describe("Output format: 'markdown' for human-readable or 'json' for machine-readable")
+}).strict();
+
+type UserSearchInput = z.infer;
+
+// Shared utility functions
+async function makeApiRequest(
+ endpoint: string,
+ method: "GET" | "POST" | "PUT" | "DELETE" = "GET",
+ data?: any,
+ params?: any
+): Promise {
+ try {
+ const response = await axios({
+ method,
+ url: `${API_BASE_URL}/${endpoint}`,
+ data,
+ params,
+ timeout: 30000,
+ headers: {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+ }
+ });
+ return response.data;
+ } catch (error) {
+ throw error;
+ }
+}
+
+function handleApiError(error: unknown): string {
+ if (error instanceof AxiosError) {
+ if (error.response) {
+ switch (error.response.status) {
+ case 404:
+ return "Error: Resource not found. Please check the ID is correct.";
+ case 403:
+ return "Error: Permission denied. You don't have access to this resource.";
+ case 429:
+ return "Error: Rate limit exceeded. Please wait before making more requests.";
+ default:
+ return `Error: API request failed with status ${error.response.status}`;
+ }
+ } else if (error.code === "ECONNABORTED") {
+ return "Error: Request timed out. Please try again.";
+ }
+ }
+ return `Error: Unexpected error occurred: ${error instanceof Error ? error.message : String(error)}`;
+}
+
+// Create MCP server instance
+const server = new McpServer({
+ name: "example-mcp",
+ version: "1.0.0"
+});
+
+// Register tools
+server.registerTool(
+ "example_search_users",
+ {
+ title: "Search Example Users",
+ description: `[Full description as shown above]`,
+ inputSchema: UserSearchInputSchema,
+ annotations: {
+ readOnlyHint: true,
+ destructiveHint: false,
+ idempotentHint: true,
+ openWorldHint: true
+ }
+ },
+ async (params: UserSearchInput) => {
+ // Implementation as shown above
+ }
+);
+
+// Main function
+// For stdio (local):
+async function runStdio() {
+ if (!process.env.EXAMPLE_API_KEY) {
+ console.error("ERROR: EXAMPLE_API_KEY environment variable is required");
+ process.exit(1);
+ }
+
+ const transport = new StdioServerTransport();
+ await server.connect(transport);
+ console.error("MCP server running via stdio");
+}
+
+// For streamable HTTP (remote):
+async function runHTTP() {
+ if (!process.env.EXAMPLE_API_KEY) {
+ console.error("ERROR: EXAMPLE_API_KEY environment variable is required");
+ process.exit(1);
+ }
+
+ const app = express();
+ app.use(express.json());
+
+ app.post('/mcp', async (req, res) => {
+ const transport = new StreamableHTTPServerTransport({
+ sessionIdGenerator: undefined,
+ enableJsonResponse: true
+ });
+ res.on('close', () => transport.close());
+ await server.connect(transport);
+ await transport.handleRequest(req, res, req.body);
+ });
+
+ const port = parseInt(process.env.PORT || '3000');
+ app.listen(port, () => {
+ console.error(`MCP server running on http://localhost:${port}/mcp`);
+ });
+}
+
+// Choose transport based on environment
+const transport = process.env.TRANSPORT || 'stdio';
+if (transport === 'http') {
+ runHTTP().catch(error => {
+ console.error("Server error:", error);
+ process.exit(1);
+ });
+} else {
+ runStdio().catch(error => {
+ console.error("Server error:", error);
+ process.exit(1);
+ });
+}
+```
+
+---
+
+## Advanced MCP Features
+
+### Resource Registration
+
+Expose data as resources for efficient, URI-based access:
+
+```typescript
+import { ResourceTemplate } from "@modelcontextprotocol/sdk/types.js";
+
+// Register a resource with URI template
+server.registerResource(
+ {
+ uri: "file://documents/{name}",
+ name: "Document Resource",
+ description: "Access documents by name",
+ mimeType: "text/plain"
+ },
+ async (uri: string) => {
+ // Extract parameter from URI
+ const match = uri.match(/^file:\/\/documents\/(.+)$/);
+ if (!match) {
+ throw new Error("Invalid URI format");
+ }
+
+ const documentName = match[1];
+ const content = await loadDocument(documentName);
+
+ return {
+ contents: [{
+ uri,
+ mimeType: "text/plain",
+ text: content
+ }]
+ };
+ }
+);
+
+// List available resources dynamically
+server.registerResourceList(async () => {
+ const documents = await getAvailableDocuments();
+ return {
+ resources: documents.map(doc => ({
+ uri: `file://documents/${doc.name}`,
+ name: doc.name,
+ mimeType: "text/plain",
+ description: doc.description
+ }))
+ };
+});
+```
+
+**When to use Resources vs Tools:**
+- **Resources**: For data access with simple URI-based parameters
+- **Tools**: For complex operations requiring validation and business logic
+- **Resources**: When data is relatively static or template-based
+- **Tools**: When operations have side effects or complex workflows
+
+### Transport Options
+
+The TypeScript SDK supports two main transport mechanisms:
+
+#### Streamable HTTP (Recommended for Remote Servers)
+
+```typescript
+import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
+import express from "express";
+
+const app = express();
+app.use(express.json());
+
+app.post('/mcp', async (req, res) => {
+ // Create new transport for each request (stateless, prevents request ID collisions)
+ const transport = new StreamableHTTPServerTransport({
+ sessionIdGenerator: undefined,
+ enableJsonResponse: true
+ });
+
+ res.on('close', () => transport.close());
+
+ await server.connect(transport);
+ await transport.handleRequest(req, res, req.body);
+});
+
+app.listen(3000);
+```
+
+#### stdio (For Local Integrations)
+
+```typescript
+import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+
+const transport = new StdioServerTransport();
+await server.connect(transport);
+```
+
+**Transport selection:**
+- **Streamable HTTP**: Web services, remote access, multiple clients
+- **stdio**: Command-line tools, local development, subprocess integration
+
+### Notification Support
+
+Notify clients when server state changes:
+
+```typescript
+// Notify when tools list changes
+server.notification({
+ method: "notifications/tools/list_changed"
+});
+
+// Notify when resources change
+server.notification({
+ method: "notifications/resources/list_changed"
+});
+```
+
+Use notifications sparingly - only when server capabilities genuinely change.
+
+---
+
+## Code Best Practices
+
+### Code Composability and Reusability
+
+Your implementation MUST prioritize composability and code reuse:
+
+1. **Extract Common Functionality**:
+ - Create reusable helper functions for operations used across multiple tools
+ - Build shared API clients for HTTP requests instead of duplicating code
+ - Centralize error handling logic in utility functions
+ - Extract business logic into dedicated functions that can be composed
+ - Extract shared markdown or JSON field selection & formatting functionality
+
+2. **Avoid Duplication**:
+ - NEVER copy-paste similar code between tools
+ - If you find yourself writing similar logic twice, extract it into a function
+ - Common operations like pagination, filtering, field selection, and formatting should be shared
+ - Authentication/authorization logic should be centralized
+
+## Building and Running
+
+Always build your TypeScript code before running:
+
+```bash
+# Build the project
+npm run build
+
+# Run the server
+npm start
+
+# Development with auto-reload
+npm run dev
+```
+
+Always ensure `npm run build` completes successfully before considering the implementation complete.
+
+## Quality Checklist
+
+Before finalizing your Node/TypeScript MCP server implementation, ensure:
+
+### Strategic Design
+- [ ] Tools enable complete workflows, not just API endpoint wrappers
+- [ ] Tool names reflect natural task subdivisions
+- [ ] Response formats optimize for agent context efficiency
+- [ ] Human-readable identifiers used where appropriate
+- [ ] Error messages guide agents toward correct usage
+
+### Implementation Quality
+- [ ] FOCUSED IMPLEMENTATION: Most important and valuable tools implemented
+- [ ] All tools registered using `registerTool` with complete configuration
+- [ ] All tools include `title`, `description`, `inputSchema`, and `annotations`
+- [ ] Annotations correctly set (readOnlyHint, destructiveHint, idempotentHint, openWorldHint)
+- [ ] All tools use Zod schemas for runtime input validation with `.strict()` enforcement
+- [ ] All Zod schemas have proper constraints and descriptive error messages
+- [ ] All tools have comprehensive descriptions with explicit input/output types
+- [ ] Descriptions include return value examples and complete schema documentation
+- [ ] Error messages are clear, actionable, and educational
+
+### TypeScript Quality
+- [ ] TypeScript interfaces are defined for all data structures
+- [ ] Strict TypeScript is enabled in tsconfig.json
+- [ ] No use of `any` type - use `unknown` or proper types instead
+- [ ] All async functions have explicit Promise return types
+- [ ] Error handling uses proper type guards (e.g., `axios.isAxiosError`, `z.ZodError`)
+
+### Advanced Features (where applicable)
+- [ ] Resources registered for appropriate data endpoints
+- [ ] Appropriate transport configured (stdio or streamable HTTP)
+- [ ] Notifications implemented for dynamic server capabilities
+- [ ] Type-safe with SDK interfaces
+
+### Project Configuration
+- [ ] Package.json includes all necessary dependencies
+- [ ] Build script produces working JavaScript in dist/ directory
+- [ ] Main entry point is properly configured as dist/index.js
+- [ ] Server name follows format: `{service}-mcp-server`
+- [ ] tsconfig.json properly configured with strict mode
+
+### Code Quality
+- [ ] Pagination is properly implemented where applicable
+- [ ] Large responses check CHARACTER_LIMIT constant and truncate with clear messages
+- [ ] Filtering options are provided for potentially large result sets
+- [ ] All network operations handle timeouts and connection errors gracefully
+- [ ] Common functionality is extracted into reusable functions
+- [ ] Return types are consistent across similar operations
+
+### Testing and Build
+- [ ] `npm run build` completes successfully without errors
+- [ ] dist/index.js created and executable
+- [ ] Server runs: `node dist/index.js --help`
+- [ ] All imports resolve correctly
+- [ ] Sample tool calls work as expected
\ No newline at end of file
diff --git a/.claude/skills/mcp-builder/reference/python_mcp_server.md b/.claude/skills/mcp-builder/reference/python_mcp_server.md
new file mode 100644
index 0000000..cf7ec99
--- /dev/null
+++ b/.claude/skills/mcp-builder/reference/python_mcp_server.md
@@ -0,0 +1,719 @@
+# Python MCP Server Implementation Guide
+
+## Overview
+
+This document provides Python-specific best practices and examples for implementing MCP servers using the MCP Python SDK. It covers server setup, tool registration patterns, input validation with Pydantic, error handling, and complete working examples.
+
+---
+
+## Quick Reference
+
+### Key Imports
+```python
+from mcp.server.fastmcp import FastMCP
+from pydantic import BaseModel, Field, field_validator, ConfigDict
+from typing import Optional, List, Dict, Any
+from enum import Enum
+import httpx
+```
+
+### Server Initialization
+```python
+mcp = FastMCP("service_mcp")
+```
+
+### Tool Registration Pattern
+```python
+@mcp.tool(name="tool_name", annotations={...})
+async def tool_function(params: InputModel) -> str:
+ # Implementation
+ pass
+```
+
+---
+
+## MCP Python SDK and FastMCP
+
+The official MCP Python SDK provides FastMCP, a high-level framework for building MCP servers. It provides:
+- Automatic description and inputSchema generation from function signatures and docstrings
+- Pydantic model integration for input validation
+- Decorator-based tool registration with `@mcp.tool`
+
+**For complete SDK documentation, use WebFetch to load:**
+`https://raw.githubusercontent.com/modelcontextprotocol/python-sdk/main/README.md`
+
+## Server Naming Convention
+
+Python MCP servers must follow this naming pattern:
+- **Format**: `{service}_mcp` (lowercase with underscores)
+- **Examples**: `github_mcp`, `jira_mcp`, `stripe_mcp`
+
+The name should be:
+- General (not tied to specific features)
+- Descriptive of the service/API being integrated
+- Easy to infer from the task description
+- Without version numbers or dates
+
+## Tool Implementation
+
+### Tool Naming
+
+Use snake_case for tool names (e.g., "search_users", "create_project", "get_channel_info") with clear, action-oriented names.
+
+**Avoid Naming Conflicts**: Include the service context to prevent overlaps:
+- Use "slack_send_message" instead of just "send_message"
+- Use "github_create_issue" instead of just "create_issue"
+- Use "asana_list_tasks" instead of just "list_tasks"
+
+### Tool Structure with FastMCP
+
+Tools are defined using the `@mcp.tool` decorator with Pydantic models for input validation:
+
+```python
+from pydantic import BaseModel, Field, ConfigDict
+from mcp.server.fastmcp import FastMCP
+
+# Initialize the MCP server
+mcp = FastMCP("example_mcp")
+
+# Define Pydantic model for input validation
+class ServiceToolInput(BaseModel):
+ '''Input model for service tool operation.'''
+ model_config = ConfigDict(
+ str_strip_whitespace=True, # Auto-strip whitespace from strings
+ validate_assignment=True, # Validate on assignment
+ extra='forbid' # Forbid extra fields
+ )
+
+ param1: str = Field(..., description="First parameter description (e.g., 'user123', 'project-abc')", min_length=1, max_length=100)
+ param2: Optional[int] = Field(default=None, description="Optional integer parameter with constraints", ge=0, le=1000)
+ tags: Optional[List[str]] = Field(default_factory=list, description="List of tags to apply", max_items=10)
+
+@mcp.tool(
+ name="service_tool_name",
+ annotations={
+ "title": "Human-Readable Tool Title",
+ "readOnlyHint": True, # Tool does not modify environment
+ "destructiveHint": False, # Tool does not perform destructive operations
+ "idempotentHint": True, # Repeated calls have no additional effect
+ "openWorldHint": False # Tool does not interact with external entities
+ }
+)
+async def service_tool_name(params: ServiceToolInput) -> str:
+ '''Tool description automatically becomes the 'description' field.
+
+ This tool performs a specific operation on the service. It validates all inputs
+ using the ServiceToolInput Pydantic model before processing.
+
+ Args:
+ params (ServiceToolInput): Validated input parameters containing:
+ - param1 (str): First parameter description
+ - param2 (Optional[int]): Optional parameter with default
+ - tags (Optional[List[str]]): List of tags
+
+ Returns:
+ str: JSON-formatted response containing operation results
+ '''
+ # Implementation here
+ pass
+```
+
+## Pydantic v2 Key Features
+
+- Use `model_config` instead of nested `Config` class
+- Use `field_validator` instead of deprecated `validator`
+- Use `model_dump()` instead of deprecated `dict()`
+- Validators require `@classmethod` decorator
+- Type hints are required for validator methods
+
+```python
+from pydantic import BaseModel, Field, field_validator, ConfigDict
+
+class CreateUserInput(BaseModel):
+ model_config = ConfigDict(
+ str_strip_whitespace=True,
+ validate_assignment=True
+ )
+
+ name: str = Field(..., description="User's full name", min_length=1, max_length=100)
+ email: str = Field(..., description="User's email address", pattern=r'^[\w\.-]+@[\w\.-]+\.\w+$')
+ age: int = Field(..., description="User's age", ge=0, le=150)
+
+ @field_validator('email')
+ @classmethod
+ def validate_email(cls, v: str) -> str:
+ if not v.strip():
+ raise ValueError("Email cannot be empty")
+ return v.lower()
+```
+
+## Response Format Options
+
+Support multiple output formats for flexibility:
+
+```python
+from enum import Enum
+
+class ResponseFormat(str, Enum):
+ '''Output format for tool responses.'''
+ MARKDOWN = "markdown"
+ JSON = "json"
+
+class UserSearchInput(BaseModel):
+ query: str = Field(..., description="Search query")
+ response_format: ResponseFormat = Field(
+ default=ResponseFormat.MARKDOWN,
+ description="Output format: 'markdown' for human-readable or 'json' for machine-readable"
+ )
+```
+
+**Markdown format**:
+- Use headers, lists, and formatting for clarity
+- Convert timestamps to human-readable format (e.g., "2024-01-15 10:30:00 UTC" instead of epoch)
+- Show display names with IDs in parentheses (e.g., "@john.doe (U123456)")
+- Omit verbose metadata (e.g., show only one profile image URL, not all sizes)
+- Group related information logically
+
+**JSON format**:
+- Return complete, structured data suitable for programmatic processing
+- Include all available fields and metadata
+- Use consistent field names and types
+
+## Pagination Implementation
+
+For tools that list resources:
+
+```python
+class ListInput(BaseModel):
+ limit: Optional[int] = Field(default=20, description="Maximum results to return", ge=1, le=100)
+ offset: Optional[int] = Field(default=0, description="Number of results to skip for pagination", ge=0)
+
+async def list_items(params: ListInput) -> str:
+ # Make API request with pagination
+ data = await api_request(limit=params.limit, offset=params.offset)
+
+ # Return pagination info
+ response = {
+ "total": data["total"],
+ "count": len(data["items"]),
+ "offset": params.offset,
+ "items": data["items"],
+ "has_more": data["total"] > params.offset + len(data["items"]),
+ "next_offset": params.offset + len(data["items"]) if data["total"] > params.offset + len(data["items"]) else None
+ }
+ return json.dumps(response, indent=2)
+```
+
+## Error Handling
+
+Provide clear, actionable error messages:
+
+```python
+def _handle_api_error(e: Exception) -> str:
+ '''Consistent error formatting across all tools.'''
+ if isinstance(e, httpx.HTTPStatusError):
+ if e.response.status_code == 404:
+ return "Error: Resource not found. Please check the ID is correct."
+ elif e.response.status_code == 403:
+ return "Error: Permission denied. You don't have access to this resource."
+ elif e.response.status_code == 429:
+ return "Error: Rate limit exceeded. Please wait before making more requests."
+ return f"Error: API request failed with status {e.response.status_code}"
+ elif isinstance(e, httpx.TimeoutException):
+ return "Error: Request timed out. Please try again."
+ return f"Error: Unexpected error occurred: {type(e).__name__}"
+```
+
+## Shared Utilities
+
+Extract common functionality into reusable functions:
+
+```python
+# Shared API request function
+async def _make_api_request(endpoint: str, method: str = "GET", **kwargs) -> dict:
+ '''Reusable function for all API calls.'''
+ async with httpx.AsyncClient() as client:
+ response = await client.request(
+ method,
+ f"{API_BASE_URL}/{endpoint}",
+ timeout=30.0,
+ **kwargs
+ )
+ response.raise_for_status()
+ return response.json()
+```
+
+## Async/Await Best Practices
+
+Always use async/await for network requests and I/O operations:
+
+```python
+# Good: Async network request
+async def fetch_data(resource_id: str) -> dict:
+ async with httpx.AsyncClient() as client:
+ response = await client.get(f"{API_URL}/resource/{resource_id}")
+ response.raise_for_status()
+ return response.json()
+
+# Bad: Synchronous request
+def fetch_data(resource_id: str) -> dict:
+ response = requests.get(f"{API_URL}/resource/{resource_id}") # Blocks
+ return response.json()
+```
+
+## Type Hints
+
+Use type hints throughout:
+
+```python
+from typing import Optional, List, Dict, Any
+
+async def get_user(user_id: str) -> Dict[str, Any]:
+ data = await fetch_user(user_id)
+ return {"id": data["id"], "name": data["name"]}
+```
+
+## Tool Docstrings
+
+Every tool must have comprehensive docstrings with explicit type information:
+
+```python
+async def search_users(params: UserSearchInput) -> str:
+ '''
+ Search for users in the Example system by name, email, or team.
+
+ This tool searches across all user profiles in the Example platform,
+ supporting partial matches and various search filters. It does NOT
+ create or modify users, only searches existing ones.
+
+ Args:
+ params (UserSearchInput): Validated input parameters containing:
+ - query (str): Search string to match against names/emails (e.g., "john", "@example.com", "team:marketing")
+ - limit (Optional[int]): Maximum results to return, between 1-100 (default: 20)
+ - offset (Optional[int]): Number of results to skip for pagination (default: 0)
+
+ Returns:
+ str: JSON-formatted string containing search results with the following schema:
+
+ Success response:
+ {
+ "total": int, # Total number of matches found
+ "count": int, # Number of results in this response
+ "offset": int, # Current pagination offset
+ "users": [
+ {
+ "id": str, # User ID (e.g., "U123456789")
+ "name": str, # Full name (e.g., "John Doe")
+ "email": str, # Email address (e.g., "john@example.com")
+ "team": str # Team name (e.g., "Marketing") - optional
+ }
+ ]
+ }
+
+ Error response:
+ "Error: " or "No users found matching ''"
+
+ Examples:
+ - Use when: "Find all marketing team members" -> params with query="team:marketing"
+ - Use when: "Search for John's account" -> params with query="john"
+ - Don't use when: You need to create a user (use example_create_user instead)
+ - Don't use when: You have a user ID and need full details (use example_get_user instead)
+
+ Error Handling:
+ - Input validation errors are handled by Pydantic model
+ - Returns "Error: Rate limit exceeded" if too many requests (429 status)
+ - Returns "Error: Invalid API authentication" if API key is invalid (401 status)
+ - Returns formatted list of results or "No users found matching 'query'"
+ '''
+```
+
+## Complete Example
+
+See below for a complete Python MCP server example:
+
+```python
+#!/usr/bin/env python3
+'''
+MCP Server for Example Service.
+
+This server provides tools to interact with Example API, including user search,
+project management, and data export capabilities.
+'''
+
+from typing import Optional, List, Dict, Any
+from enum import Enum
+import httpx
+from pydantic import BaseModel, Field, field_validator, ConfigDict
+from mcp.server.fastmcp import FastMCP
+
+# Initialize the MCP server
+mcp = FastMCP("example_mcp")
+
+# Constants
+API_BASE_URL = "https://api.example.com/v1"
+
+# Enums
+class ResponseFormat(str, Enum):
+ '''Output format for tool responses.'''
+ MARKDOWN = "markdown"
+ JSON = "json"
+
+# Pydantic Models for Input Validation
+class UserSearchInput(BaseModel):
+ '''Input model for user search operations.'''
+ model_config = ConfigDict(
+ str_strip_whitespace=True,
+ validate_assignment=True
+ )
+
+ query: str = Field(..., description="Search string to match against names/emails", min_length=2, max_length=200)
+ limit: Optional[int] = Field(default=20, description="Maximum results to return", ge=1, le=100)
+ offset: Optional[int] = Field(default=0, description="Number of results to skip for pagination", ge=0)
+ response_format: ResponseFormat = Field(default=ResponseFormat.MARKDOWN, description="Output format")
+
+ @field_validator('query')
+ @classmethod
+ def validate_query(cls, v: str) -> str:
+ if not v.strip():
+ raise ValueError("Query cannot be empty or whitespace only")
+ return v.strip()
+
+# Shared utility functions
+async def _make_api_request(endpoint: str, method: str = "GET", **kwargs) -> dict:
+ '''Reusable function for all API calls.'''
+ async with httpx.AsyncClient() as client:
+ response = await client.request(
+ method,
+ f"{API_BASE_URL}/{endpoint}",
+ timeout=30.0,
+ **kwargs
+ )
+ response.raise_for_status()
+ return response.json()
+
+def _handle_api_error(e: Exception) -> str:
+ '''Consistent error formatting across all tools.'''
+ if isinstance(e, httpx.HTTPStatusError):
+ if e.response.status_code == 404:
+ return "Error: Resource not found. Please check the ID is correct."
+ elif e.response.status_code == 403:
+ return "Error: Permission denied. You don't have access to this resource."
+ elif e.response.status_code == 429:
+ return "Error: Rate limit exceeded. Please wait before making more requests."
+ return f"Error: API request failed with status {e.response.status_code}"
+ elif isinstance(e, httpx.TimeoutException):
+ return "Error: Request timed out. Please try again."
+ return f"Error: Unexpected error occurred: {type(e).__name__}"
+
+# Tool definitions
+@mcp.tool(
+ name="example_search_users",
+ annotations={
+ "title": "Search Example Users",
+ "readOnlyHint": True,
+ "destructiveHint": False,
+ "idempotentHint": True,
+ "openWorldHint": True
+ }
+)
+async def example_search_users(params: UserSearchInput) -> str:
+ '''Search for users in the Example system by name, email, or team.
+
+ [Full docstring as shown above]
+ '''
+ try:
+ # Make API request using validated parameters
+ data = await _make_api_request(
+ "users/search",
+ params={
+ "q": params.query,
+ "limit": params.limit,
+ "offset": params.offset
+ }
+ )
+
+ users = data.get("users", [])
+ total = data.get("total", 0)
+
+ if not users:
+ return f"No users found matching '{params.query}'"
+
+ # Format response based on requested format
+ if params.response_format == ResponseFormat.MARKDOWN:
+ lines = [f"# User Search Results: '{params.query}'", ""]
+ lines.append(f"Found {total} users (showing {len(users)})")
+ lines.append("")
+
+ for user in users:
+ lines.append(f"## {user['name']} ({user['id']})")
+ lines.append(f"- **Email**: {user['email']}")
+ if user.get('team'):
+ lines.append(f"- **Team**: {user['team']}")
+ lines.append("")
+
+ return "\n".join(lines)
+
+ else:
+ # Machine-readable JSON format
+ import json
+ response = {
+ "total": total,
+ "count": len(users),
+ "offset": params.offset,
+ "users": users
+ }
+ return json.dumps(response, indent=2)
+
+ except Exception as e:
+ return _handle_api_error(e)
+
+if __name__ == "__main__":
+ mcp.run()
+```
+
+---
+
+## Advanced FastMCP Features
+
+### Context Parameter Injection
+
+FastMCP can automatically inject a `Context` parameter into tools for advanced capabilities like logging, progress reporting, resource reading, and user interaction:
+
+```python
+from mcp.server.fastmcp import FastMCP, Context
+
+mcp = FastMCP("example_mcp")
+
+@mcp.tool()
+async def advanced_search(query: str, ctx: Context) -> str:
+ '''Advanced tool with context access for logging and progress.'''
+
+ # Report progress for long operations
+ await ctx.report_progress(0.25, "Starting search...")
+
+ # Log information for debugging
+ await ctx.log_info("Processing query", {"query": query, "timestamp": datetime.now()})
+
+ # Perform search
+ results = await search_api(query)
+ await ctx.report_progress(0.75, "Formatting results...")
+
+ # Access server configuration
+ server_name = ctx.fastmcp.name
+
+ return format_results(results)
+
+@mcp.tool()
+async def interactive_tool(resource_id: str, ctx: Context) -> str:
+ '''Tool that can request additional input from users.'''
+
+ # Request sensitive information when needed
+ api_key = await ctx.elicit(
+ prompt="Please provide your API key:",
+ input_type="password"
+ )
+
+ # Use the provided key
+ return await api_call(resource_id, api_key)
+```
+
+**Context capabilities:**
+- `ctx.report_progress(progress, message)` - Report progress for long operations
+- `ctx.log_info(message, data)` / `ctx.log_error()` / `ctx.log_debug()` - Logging
+- `ctx.elicit(prompt, input_type)` - Request input from users
+- `ctx.fastmcp.name` - Access server configuration
+- `ctx.read_resource(uri)` - Read MCP resources
+
+### Resource Registration
+
+Expose data as resources for efficient, template-based access:
+
+```python
+@mcp.resource("file://documents/{name}")
+async def get_document(name: str) -> str:
+ '''Expose documents as MCP resources.
+
+ Resources are useful for static or semi-static data that doesn't
+ require complex parameters. They use URI templates for flexible access.
+ '''
+ document_path = f"./docs/{name}"
+ with open(document_path, "r") as f:
+ return f.read()
+
+@mcp.resource("config://settings/{key}")
+async def get_setting(key: str, ctx: Context) -> str:
+ '''Expose configuration as resources with context.'''
+ settings = await load_settings()
+ return json.dumps(settings.get(key, {}))
+```
+
+**When to use Resources vs Tools:**
+- **Resources**: For data access with simple parameters (URI templates)
+- **Tools**: For complex operations with validation and business logic
+
+### Structured Output Types
+
+FastMCP supports multiple return types beyond strings:
+
+```python
+from typing import TypedDict
+from dataclasses import dataclass
+from pydantic import BaseModel
+
+# TypedDict for structured returns
+class UserData(TypedDict):
+ id: str
+ name: str
+ email: str
+
+@mcp.tool()
+async def get_user_typed(user_id: str) -> UserData:
+ '''Returns structured data - FastMCP handles serialization.'''
+ return {"id": user_id, "name": "John Doe", "email": "john@example.com"}
+
+# Pydantic models for complex validation
+class DetailedUser(BaseModel):
+ id: str
+ name: str
+ email: str
+ created_at: datetime
+ metadata: Dict[str, Any]
+
+@mcp.tool()
+async def get_user_detailed(user_id: str) -> DetailedUser:
+ '''Returns Pydantic model - automatically generates schema.'''
+ user = await fetch_user(user_id)
+ return DetailedUser(**user)
+```
+
+### Lifespan Management
+
+Initialize resources that persist across requests:
+
+```python
+from contextlib import asynccontextmanager
+
+@asynccontextmanager
+async def app_lifespan():
+ '''Manage resources that live for the server's lifetime.'''
+ # Initialize connections, load config, etc.
+ db = await connect_to_database()
+ config = load_configuration()
+
+ # Make available to all tools
+ yield {"db": db, "config": config}
+
+ # Cleanup on shutdown
+ await db.close()
+
+mcp = FastMCP("example_mcp", lifespan=app_lifespan)
+
+@mcp.tool()
+async def query_data(query: str, ctx: Context) -> str:
+ '''Access lifespan resources through context.'''
+ db = ctx.request_context.lifespan_state["db"]
+ results = await db.query(query)
+ return format_results(results)
+```
+
+### Transport Options
+
+FastMCP supports two main transport mechanisms:
+
+```python
+# stdio transport (for local tools) - default
+if __name__ == "__main__":
+ mcp.run()
+
+# Streamable HTTP transport (for remote servers)
+if __name__ == "__main__":
+ mcp.run(transport="streamable_http", port=8000)
+```
+
+**Transport selection:**
+- **stdio**: Command-line tools, local integrations, subprocess execution
+- **Streamable HTTP**: Web services, remote access, multiple clients
+
+---
+
+## Code Best Practices
+
+### Code Composability and Reusability
+
+Your implementation MUST prioritize composability and code reuse:
+
+1. **Extract Common Functionality**:
+ - Create reusable helper functions for operations used across multiple tools
+ - Build shared API clients for HTTP requests instead of duplicating code
+ - Centralize error handling logic in utility functions
+ - Extract business logic into dedicated functions that can be composed
+ - Extract shared markdown or JSON field selection & formatting functionality
+
+2. **Avoid Duplication**:
+ - NEVER copy-paste similar code between tools
+ - If you find yourself writing similar logic twice, extract it into a function
+ - Common operations like pagination, filtering, field selection, and formatting should be shared
+ - Authentication/authorization logic should be centralized
+
+### Python-Specific Best Practices
+
+1. **Use Type Hints**: Always include type annotations for function parameters and return values
+2. **Pydantic Models**: Define clear Pydantic models for all input validation
+3. **Avoid Manual Validation**: Let Pydantic handle input validation with constraints
+4. **Proper Imports**: Group imports (standard library, third-party, local)
+5. **Error Handling**: Use specific exception types (httpx.HTTPStatusError, not generic Exception)
+6. **Async Context Managers**: Use `async with` for resources that need cleanup
+7. **Constants**: Define module-level constants in UPPER_CASE
+
+## Quality Checklist
+
+Before finalizing your Python MCP server implementation, ensure:
+
+### Strategic Design
+- [ ] Tools enable complete workflows, not just API endpoint wrappers
+- [ ] Tool names reflect natural task subdivisions
+- [ ] Response formats optimize for agent context efficiency
+- [ ] Human-readable identifiers used where appropriate
+- [ ] Error messages guide agents toward correct usage
+
+### Implementation Quality
+- [ ] FOCUSED IMPLEMENTATION: Most important and valuable tools implemented
+- [ ] All tools have descriptive names and documentation
+- [ ] Return types are consistent across similar operations
+- [ ] Error handling is implemented for all external calls
+- [ ] Server name follows format: `{service}_mcp`
+- [ ] All network operations use async/await
+- [ ] Common functionality is extracted into reusable functions
+- [ ] Error messages are clear, actionable, and educational
+- [ ] Outputs are properly validated and formatted
+
+### Tool Configuration
+- [ ] All tools implement 'name' and 'annotations' in the decorator
+- [ ] Annotations correctly set (readOnlyHint, destructiveHint, idempotentHint, openWorldHint)
+- [ ] All tools use Pydantic BaseModel for input validation with Field() definitions
+- [ ] All Pydantic Fields have explicit types and descriptions with constraints
+- [ ] All tools have comprehensive docstrings with explicit input/output types
+- [ ] Docstrings include complete schema structure for dict/JSON returns
+- [ ] Pydantic models handle input validation (no manual validation needed)
+
+### Advanced Features (where applicable)
+- [ ] Context injection used for logging, progress, or elicitation
+- [ ] Resources registered for appropriate data endpoints
+- [ ] Lifespan management implemented for persistent connections
+- [ ] Structured output types used (TypedDict, Pydantic models)
+- [ ] Appropriate transport configured (stdio or streamable HTTP)
+
+### Code Quality
+- [ ] File includes proper imports including Pydantic imports
+- [ ] Pagination is properly implemented where applicable
+- [ ] Filtering options are provided for potentially large result sets
+- [ ] All async functions are properly defined with `async def`
+- [ ] HTTP client usage follows async patterns with proper context managers
+- [ ] Type hints are used throughout the code
+- [ ] Constants are defined at module level in UPPER_CASE
+
+### Testing
+- [ ] Server runs successfully: `python your_server.py --help`
+- [ ] All imports resolve correctly
+- [ ] Sample tool calls work as expected
+- [ ] Error scenarios handled gracefully
\ No newline at end of file
diff --git a/.claude/skills/mcp-builder/scripts/connections.py b/.claude/skills/mcp-builder/scripts/connections.py
new file mode 100644
index 0000000..ffcd0da
--- /dev/null
+++ b/.claude/skills/mcp-builder/scripts/connections.py
@@ -0,0 +1,151 @@
+"""Lightweight connection handling for MCP servers."""
+
+from abc import ABC, abstractmethod
+from contextlib import AsyncExitStack
+from typing import Any
+
+from mcp import ClientSession, StdioServerParameters
+from mcp.client.sse import sse_client
+from mcp.client.stdio import stdio_client
+from mcp.client.streamable_http import streamablehttp_client
+
+
+class MCPConnection(ABC):
+ """Base class for MCP server connections."""
+
+ def __init__(self):
+ self.session = None
+ self._stack = None
+
+ @abstractmethod
+ def _create_context(self):
+ """Create the connection context based on connection type."""
+
+ async def __aenter__(self):
+ """Initialize MCP server connection."""
+ self._stack = AsyncExitStack()
+ await self._stack.__aenter__()
+
+ try:
+ ctx = self._create_context()
+ result = await self._stack.enter_async_context(ctx)
+
+ if len(result) == 2:
+ read, write = result
+ elif len(result) == 3:
+ read, write, _ = result
+ else:
+ raise ValueError(f"Unexpected context result: {result}")
+
+ session_ctx = ClientSession(read, write)
+ self.session = await self._stack.enter_async_context(session_ctx)
+ await self.session.initialize()
+ return self
+ except BaseException:
+ await self._stack.__aexit__(None, None, None)
+ raise
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ """Clean up MCP server connection resources."""
+ if self._stack:
+ await self._stack.__aexit__(exc_type, exc_val, exc_tb)
+ self.session = None
+ self._stack = None
+
+ async def list_tools(self) -> list[dict[str, Any]]:
+ """Retrieve available tools from the MCP server."""
+ response = await self.session.list_tools()
+ return [
+ {
+ "name": tool.name,
+ "description": tool.description,
+ "input_schema": tool.inputSchema,
+ }
+ for tool in response.tools
+ ]
+
+ async def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> Any:
+ """Call a tool on the MCP server with provided arguments."""
+ result = await self.session.call_tool(tool_name, arguments=arguments)
+ return result.content
+
+
+class MCPConnectionStdio(MCPConnection):
+ """MCP connection using standard input/output."""
+
+ def __init__(self, command: str, args: list[str] = None, env: dict[str, str] = None):
+ super().__init__()
+ self.command = command
+ self.args = args or []
+ self.env = env
+
+ def _create_context(self):
+ return stdio_client(
+ StdioServerParameters(command=self.command, args=self.args, env=self.env)
+ )
+
+
+class MCPConnectionSSE(MCPConnection):
+ """MCP connection using Server-Sent Events."""
+
+ def __init__(self, url: str, headers: dict[str, str] = None):
+ super().__init__()
+ self.url = url
+ self.headers = headers or {}
+
+ def _create_context(self):
+ return sse_client(url=self.url, headers=self.headers)
+
+
+class MCPConnectionHTTP(MCPConnection):
+ """MCP connection using Streamable HTTP."""
+
+ def __init__(self, url: str, headers: dict[str, str] = None):
+ super().__init__()
+ self.url = url
+ self.headers = headers or {}
+
+ def _create_context(self):
+ return streamablehttp_client(url=self.url, headers=self.headers)
+
+
+def create_connection(
+ transport: str,
+ command: str = None,
+ args: list[str] = None,
+ env: dict[str, str] = None,
+ url: str = None,
+ headers: dict[str, str] = None,
+) -> MCPConnection:
+ """Factory function to create the appropriate MCP connection.
+
+ Args:
+ transport: Connection type ("stdio", "sse", or "http")
+ command: Command to run (stdio only)
+ args: Command arguments (stdio only)
+ env: Environment variables (stdio only)
+ url: Server URL (sse and http only)
+ headers: HTTP headers (sse and http only)
+
+ Returns:
+ MCPConnection instance
+ """
+ transport = transport.lower()
+
+ if transport == "stdio":
+ if not command:
+ raise ValueError("Command is required for stdio transport")
+ return MCPConnectionStdio(command=command, args=args, env=env)
+
+ elif transport == "sse":
+ if not url:
+ raise ValueError("URL is required for sse transport")
+ return MCPConnectionSSE(url=url, headers=headers)
+
+ elif transport in ["http", "streamable_http", "streamable-http"]:
+ if not url:
+ raise ValueError("URL is required for http transport")
+ return MCPConnectionHTTP(url=url, headers=headers)
+
+ else:
+ raise ValueError(f"Unsupported transport type: {transport}. Use 'stdio', 'sse', or 'http'")
diff --git a/.claude/skills/mcp-builder/scripts/evaluation.py b/.claude/skills/mcp-builder/scripts/evaluation.py
new file mode 100644
index 0000000..4177856
--- /dev/null
+++ b/.claude/skills/mcp-builder/scripts/evaluation.py
@@ -0,0 +1,373 @@
+"""MCP Server Evaluation Harness
+
+This script evaluates MCP servers by running test questions against them using Claude.
+"""
+
+import argparse
+import asyncio
+import json
+import re
+import sys
+import time
+import traceback
+import xml.etree.ElementTree as ET
+from pathlib import Path
+from typing import Any
+
+from anthropic import Anthropic
+
+from connections import create_connection
+
+EVALUATION_PROMPT = """You are an AI assistant with access to tools.
+
+When given a task, you MUST:
+1. Use the available tools to complete the task
+2. Provide summary of each step in your approach, wrapped in tags
+3. Provide feedback on the tools provided, wrapped in tags
+4. Provide your final response, wrapped in tags
+
+Summary Requirements:
+- In your tags, you must explain:
+ - The steps you took to complete the task
+ - Which tools you used, in what order, and why
+ - The inputs you provided to each tool
+ - The outputs you received from each tool
+ - A summary for how you arrived at the response
+
+Feedback Requirements:
+- In your tags, provide constructive feedback on the tools:
+ - Comment on tool names: Are they clear and descriptive?
+ - Comment on input parameters: Are they well-documented? Are required vs optional parameters clear?
+ - Comment on descriptions: Do they accurately describe what the tool does?
+ - Comment on any errors encountered during tool usage: Did the tool fail to execute? Did the tool return too many tokens?
+ - Identify specific areas for improvement and explain WHY they would help
+ - Be specific and actionable in your suggestions
+
+Response Requirements:
+- Your response should be concise and directly address what was asked
+- Always wrap your final response in tags
+- If you cannot solve the task return NOT_FOUND
+- For numeric responses, provide just the number
+- For IDs, provide just the ID
+- For names or text, provide the exact text requested
+- Your response should go last"""
+
+
+def parse_evaluation_file(file_path: Path) -> list[dict[str, Any]]:
+ """Parse XML evaluation file with qa_pair elements."""
+ try:
+ tree = ET.parse(file_path)
+ root = tree.getroot()
+ evaluations = []
+
+ for qa_pair in root.findall(".//qa_pair"):
+ question_elem = qa_pair.find("question")
+ answer_elem = qa_pair.find("answer")
+
+ if question_elem is not None and answer_elem is not None:
+ evaluations.append({
+ "question": (question_elem.text or "").strip(),
+ "answer": (answer_elem.text or "").strip(),
+ })
+
+ return evaluations
+ except Exception as e:
+ print(f"Error parsing evaluation file {file_path}: {e}")
+ return []
+
+
+def extract_xml_content(text: str, tag: str) -> str | None:
+ """Extract content from XML tags."""
+ pattern = rf"<{tag}>(.*?){tag}>"
+ matches = re.findall(pattern, text, re.DOTALL)
+ return matches[-1].strip() if matches else None
+
+
+async def agent_loop(
+ client: Anthropic,
+ model: str,
+ question: str,
+ tools: list[dict[str, Any]],
+ connection: Any,
+) -> tuple[str, dict[str, Any]]:
+ """Run the agent loop with MCP tools."""
+ messages = [{"role": "user", "content": question}]
+
+ response = await asyncio.to_thread(
+ client.messages.create,
+ model=model,
+ max_tokens=4096,
+ system=EVALUATION_PROMPT,
+ messages=messages,
+ tools=tools,
+ )
+
+ messages.append({"role": "assistant", "content": response.content})
+
+ tool_metrics = {}
+
+ while response.stop_reason == "tool_use":
+ tool_use = next(block for block in response.content if block.type == "tool_use")
+ tool_name = tool_use.name
+ tool_input = tool_use.input
+
+ tool_start_ts = time.time()
+ try:
+ tool_result = await connection.call_tool(tool_name, tool_input)
+ tool_response = json.dumps(tool_result) if isinstance(tool_result, (dict, list)) else str(tool_result)
+ except Exception as e:
+ tool_response = f"Error executing tool {tool_name}: {str(e)}\n"
+ tool_response += traceback.format_exc()
+ tool_duration = time.time() - tool_start_ts
+
+ if tool_name not in tool_metrics:
+ tool_metrics[tool_name] = {"count": 0, "durations": []}
+ tool_metrics[tool_name]["count"] += 1
+ tool_metrics[tool_name]["durations"].append(tool_duration)
+
+ messages.append({
+ "role": "user",
+ "content": [{
+ "type": "tool_result",
+ "tool_use_id": tool_use.id,
+ "content": tool_response,
+ }]
+ })
+
+ response = await asyncio.to_thread(
+ client.messages.create,
+ model=model,
+ max_tokens=4096,
+ system=EVALUATION_PROMPT,
+ messages=messages,
+ tools=tools,
+ )
+ messages.append({"role": "assistant", "content": response.content})
+
+ response_text = next(
+ (block.text for block in response.content if hasattr(block, "text")),
+ None,
+ )
+ return response_text, tool_metrics
+
+
+async def evaluate_single_task(
+ client: Anthropic,
+ model: str,
+ qa_pair: dict[str, Any],
+ tools: list[dict[str, Any]],
+ connection: Any,
+ task_index: int,
+) -> dict[str, Any]:
+ """Evaluate a single QA pair with the given tools."""
+ start_time = time.time()
+
+ print(f"Task {task_index + 1}: Running task with question: {qa_pair['question']}")
+ response, tool_metrics = await agent_loop(client, model, qa_pair["question"], tools, connection)
+
+ response_value = extract_xml_content(response, "response")
+ summary = extract_xml_content(response, "summary")
+ feedback = extract_xml_content(response, "feedback")
+
+ duration_seconds = time.time() - start_time
+
+ return {
+ "question": qa_pair["question"],
+ "expected": qa_pair["answer"],
+ "actual": response_value,
+ "score": int(response_value == qa_pair["answer"]) if response_value else 0,
+ "total_duration": duration_seconds,
+ "tool_calls": tool_metrics,
+ "num_tool_calls": sum(len(metrics["durations"]) for metrics in tool_metrics.values()),
+ "summary": summary,
+ "feedback": feedback,
+ }
+
+
+REPORT_HEADER = """
+# Evaluation Report
+
+## Summary
+
+- **Accuracy**: {correct}/{total} ({accuracy:.1f}%)
+- **Average Task Duration**: {average_duration_s:.2f}s
+- **Average Tool Calls per Task**: {average_tool_calls:.2f}
+- **Total Tool Calls**: {total_tool_calls}
+
+---
+"""
+
+TASK_TEMPLATE = """
+### Task {task_num}
+
+**Question**: {question}
+**Ground Truth Answer**: `{expected_answer}`
+**Actual Answer**: `{actual_answer}`
+**Correct**: {correct_indicator}
+**Duration**: {total_duration:.2f}s
+**Tool Calls**: {tool_calls}
+
+**Summary**
+{summary}
+
+**Feedback**
+{feedback}
+
+---
+"""
+
+
+async def run_evaluation(
+ eval_path: Path,
+ connection: Any,
+ model: str = "claude-3-7-sonnet-20250219",
+) -> str:
+ """Run evaluation with MCP server tools."""
+ print("🚀 Starting Evaluation")
+
+ client = Anthropic()
+
+ tools = await connection.list_tools()
+ print(f"📋 Loaded {len(tools)} tools from MCP server")
+
+ qa_pairs = parse_evaluation_file(eval_path)
+ print(f"📋 Loaded {len(qa_pairs)} evaluation tasks")
+
+ results = []
+ for i, qa_pair in enumerate(qa_pairs):
+ print(f"Processing task {i + 1}/{len(qa_pairs)}")
+ result = await evaluate_single_task(client, model, qa_pair, tools, connection, i)
+ results.append(result)
+
+ correct = sum(r["score"] for r in results)
+ accuracy = (correct / len(results)) * 100 if results else 0
+ average_duration_s = sum(r["total_duration"] for r in results) / len(results) if results else 0
+ average_tool_calls = sum(r["num_tool_calls"] for r in results) / len(results) if results else 0
+ total_tool_calls = sum(r["num_tool_calls"] for r in results)
+
+ report = REPORT_HEADER.format(
+ correct=correct,
+ total=len(results),
+ accuracy=accuracy,
+ average_duration_s=average_duration_s,
+ average_tool_calls=average_tool_calls,
+ total_tool_calls=total_tool_calls,
+ )
+
+ report += "".join([
+ TASK_TEMPLATE.format(
+ task_num=i + 1,
+ question=qa_pair["question"],
+ expected_answer=qa_pair["answer"],
+ actual_answer=result["actual"] or "N/A",
+ correct_indicator="✅" if result["score"] else "❌",
+ total_duration=result["total_duration"],
+ tool_calls=json.dumps(result["tool_calls"], indent=2),
+ summary=result["summary"] or "N/A",
+ feedback=result["feedback"] or "N/A",
+ )
+ for i, (qa_pair, result) in enumerate(zip(qa_pairs, results))
+ ])
+
+ return report
+
+
+def parse_headers(header_list: list[str]) -> dict[str, str]:
+ """Parse header strings in format 'Key: Value' into a dictionary."""
+ headers = {}
+ if not header_list:
+ return headers
+
+ for header in header_list:
+ if ":" in header:
+ key, value = header.split(":", 1)
+ headers[key.strip()] = value.strip()
+ else:
+ print(f"Warning: Ignoring malformed header: {header}")
+ return headers
+
+
+def parse_env_vars(env_list: list[str]) -> dict[str, str]:
+ """Parse environment variable strings in format 'KEY=VALUE' into a dictionary."""
+ env = {}
+ if not env_list:
+ return env
+
+ for env_var in env_list:
+ if "=" in env_var:
+ key, value = env_var.split("=", 1)
+ env[key.strip()] = value.strip()
+ else:
+ print(f"Warning: Ignoring malformed environment variable: {env_var}")
+ return env
+
+
+async def main():
+ parser = argparse.ArgumentParser(
+ description="Evaluate MCP servers using test questions",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog="""
+Examples:
+ # Evaluate a local stdio MCP server
+ python evaluation.py -t stdio -c python -a my_server.py eval.xml
+
+ # Evaluate an SSE MCP server
+ python evaluation.py -t sse -u https://example.com/mcp -H "Authorization: Bearer token" eval.xml
+
+ # Evaluate an HTTP MCP server with custom model
+ python evaluation.py -t http -u https://example.com/mcp -m claude-3-5-sonnet-20241022 eval.xml
+ """,
+ )
+
+ parser.add_argument("eval_file", type=Path, help="Path to evaluation XML file")
+ parser.add_argument("-t", "--transport", choices=["stdio", "sse", "http"], default="stdio", help="Transport type (default: stdio)")
+ parser.add_argument("-m", "--model", default="claude-3-7-sonnet-20250219", help="Claude model to use (default: claude-3-7-sonnet-20250219)")
+
+ stdio_group = parser.add_argument_group("stdio options")
+ stdio_group.add_argument("-c", "--command", help="Command to run MCP server (stdio only)")
+ stdio_group.add_argument("-a", "--args", nargs="+", help="Arguments for the command (stdio only)")
+ stdio_group.add_argument("-e", "--env", nargs="+", help="Environment variables in KEY=VALUE format (stdio only)")
+
+ remote_group = parser.add_argument_group("sse/http options")
+ remote_group.add_argument("-u", "--url", help="MCP server URL (sse/http only)")
+ remote_group.add_argument("-H", "--header", nargs="+", dest="headers", help="HTTP headers in 'Key: Value' format (sse/http only)")
+
+ parser.add_argument("-o", "--output", type=Path, help="Output file for evaluation report (default: stdout)")
+
+ args = parser.parse_args()
+
+ if not args.eval_file.exists():
+ print(f"Error: Evaluation file not found: {args.eval_file}")
+ sys.exit(1)
+
+ headers = parse_headers(args.headers) if args.headers else None
+ env_vars = parse_env_vars(args.env) if args.env else None
+
+ try:
+ connection = create_connection(
+ transport=args.transport,
+ command=args.command,
+ args=args.args,
+ env=env_vars,
+ url=args.url,
+ headers=headers,
+ )
+ except ValueError as e:
+ print(f"Error: {e}")
+ sys.exit(1)
+
+ print(f"🔗 Connecting to MCP server via {args.transport}...")
+
+ async with connection:
+ print("✅ Connected successfully")
+ report = await run_evaluation(args.eval_file, connection, args.model)
+
+ if args.output:
+ args.output.write_text(report)
+ print(f"\n✅ Report saved to {args.output}")
+ else:
+ print("\n" + report)
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/.claude/skills/mcp-builder/scripts/example_evaluation.xml b/.claude/skills/mcp-builder/scripts/example_evaluation.xml
new file mode 100644
index 0000000..41e4459
--- /dev/null
+++ b/.claude/skills/mcp-builder/scripts/example_evaluation.xml
@@ -0,0 +1,22 @@
+
+
+ Calculate the compound interest on $10,000 invested at 5% annual interest rate, compounded monthly for 3 years. What is the final amount in dollars (rounded to 2 decimal places)?
+ 11614.72
+
+
+ A projectile is launched at a 45-degree angle with an initial velocity of 50 m/s. Calculate the total distance (in meters) it has traveled from the launch point after 2 seconds, assuming g=9.8 m/s². Round to 2 decimal places.
+ 87.25
+
+
+ A sphere has a volume of 500 cubic meters. Calculate its surface area in square meters. Round to 2 decimal places.
+ 304.65
+
+
+ Calculate the population standard deviation of this dataset: [12, 15, 18, 22, 25, 30, 35]. Round to 2 decimal places.
+ 7.61
+
+
+ Calculate the pH of a solution with a hydrogen ion concentration of 3.5 × 10^-5 M. Round to 2 decimal places.
+ 4.46
+
+
diff --git a/.claude/skills/mcp-builder/scripts/requirements.txt b/.claude/skills/mcp-builder/scripts/requirements.txt
new file mode 100644
index 0000000..e73e5d1
--- /dev/null
+++ b/.claude/skills/mcp-builder/scripts/requirements.txt
@@ -0,0 +1,2 @@
+anthropic>=0.39.0
+mcp>=1.1.0
diff --git a/.claude/skills/pdf/.openskills.json b/.claude/skills/pdf/.openskills.json
new file mode 100644
index 0000000..a8062f6
--- /dev/null
+++ b/.claude/skills/pdf/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\pdf",
+ "installedAt": "2026-03-02T09:19:50.119Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/pdf/LICENSE.txt b/.claude/skills/pdf/LICENSE.txt
new file mode 100644
index 0000000..c55ab42
--- /dev/null
+++ b/.claude/skills/pdf/LICENSE.txt
@@ -0,0 +1,30 @@
+© 2025 Anthropic, PBC. All rights reserved.
+
+LICENSE: Use of these materials (including all code, prompts, assets, files,
+and other components of this Skill) is governed by your agreement with
+Anthropic regarding use of Anthropic's services. If no separate agreement
+exists, use is governed by Anthropic's Consumer Terms of Service or
+Commercial Terms of Service, as applicable:
+https://www.anthropic.com/legal/consumer-terms
+https://www.anthropic.com/legal/commercial-terms
+Your applicable agreement is referred to as the "Agreement." "Services" are
+as defined in the Agreement.
+
+ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the
+contrary, users may not:
+
+- Extract these materials from the Services or retain copies of these
+ materials outside the Services
+- Reproduce or copy these materials, except for temporary copies created
+ automatically during authorized use of the Services
+- Create derivative works based on these materials
+- Distribute, sublicense, or transfer these materials to any third party
+- Make, offer to sell, sell, or import any inventions embodied in these
+ materials
+- Reverse engineer, decompile, or disassemble these materials
+
+The receipt, viewing, or possession of these materials does not convey or
+imply any license or right beyond those expressly granted above.
+
+Anthropic retains all right, title, and interest in these materials,
+including all copyrights, patents, and other intellectual property rights.
diff --git a/.claude/skills/pdf/SKILL.md b/.claude/skills/pdf/SKILL.md
new file mode 100644
index 0000000..d3e046a
--- /dev/null
+++ b/.claude/skills/pdf/SKILL.md
@@ -0,0 +1,314 @@
+---
+name: pdf
+description: Use this skill whenever the user wants to do anything with PDF files. This includes reading or extracting text/tables from PDFs, combining or merging multiple PDFs into one, splitting PDFs apart, rotating pages, adding watermarks, creating new PDFs, filling PDF forms, encrypting/decrypting PDFs, extracting images, and OCR on scanned PDFs to make them searchable. If the user mentions a .pdf file or asks to produce one, use this skill.
+license: Proprietary. LICENSE.txt has complete terms
+---
+
+# PDF Processing Guide
+
+## Overview
+
+This guide covers essential PDF processing operations using Python libraries and command-line tools. For advanced features, JavaScript libraries, and detailed examples, see REFERENCE.md. If you need to fill out a PDF form, read FORMS.md and follow its instructions.
+
+## Quick Start
+
+```python
+from pypdf import PdfReader, PdfWriter
+
+# Read a PDF
+reader = PdfReader("document.pdf")
+print(f"Pages: {len(reader.pages)}")
+
+# Extract text
+text = ""
+for page in reader.pages:
+ text += page.extract_text()
+```
+
+## Python Libraries
+
+### pypdf - Basic Operations
+
+#### Merge PDFs
+```python
+from pypdf import PdfWriter, PdfReader
+
+writer = PdfWriter()
+for pdf_file in ["doc1.pdf", "doc2.pdf", "doc3.pdf"]:
+ reader = PdfReader(pdf_file)
+ for page in reader.pages:
+ writer.add_page(page)
+
+with open("merged.pdf", "wb") as output:
+ writer.write(output)
+```
+
+#### Split PDF
+```python
+reader = PdfReader("input.pdf")
+for i, page in enumerate(reader.pages):
+ writer = PdfWriter()
+ writer.add_page(page)
+ with open(f"page_{i+1}.pdf", "wb") as output:
+ writer.write(output)
+```
+
+#### Extract Metadata
+```python
+reader = PdfReader("document.pdf")
+meta = reader.metadata
+print(f"Title: {meta.title}")
+print(f"Author: {meta.author}")
+print(f"Subject: {meta.subject}")
+print(f"Creator: {meta.creator}")
+```
+
+#### Rotate Pages
+```python
+reader = PdfReader("input.pdf")
+writer = PdfWriter()
+
+page = reader.pages[0]
+page.rotate(90) # Rotate 90 degrees clockwise
+writer.add_page(page)
+
+with open("rotated.pdf", "wb") as output:
+ writer.write(output)
+```
+
+### pdfplumber - Text and Table Extraction
+
+#### Extract Text with Layout
+```python
+import pdfplumber
+
+with pdfplumber.open("document.pdf") as pdf:
+ for page in pdf.pages:
+ text = page.extract_text()
+ print(text)
+```
+
+#### Extract Tables
+```python
+with pdfplumber.open("document.pdf") as pdf:
+ for i, page in enumerate(pdf.pages):
+ tables = page.extract_tables()
+ for j, table in enumerate(tables):
+ print(f"Table {j+1} on page {i+1}:")
+ for row in table:
+ print(row)
+```
+
+#### Advanced Table Extraction
+```python
+import pandas as pd
+
+with pdfplumber.open("document.pdf") as pdf:
+ all_tables = []
+ for page in pdf.pages:
+ tables = page.extract_tables()
+ for table in tables:
+ if table: # Check if table is not empty
+ df = pd.DataFrame(table[1:], columns=table[0])
+ all_tables.append(df)
+
+# Combine all tables
+if all_tables:
+ combined_df = pd.concat(all_tables, ignore_index=True)
+ combined_df.to_excel("extracted_tables.xlsx", index=False)
+```
+
+### reportlab - Create PDFs
+
+#### Basic PDF Creation
+```python
+from reportlab.lib.pagesizes import letter
+from reportlab.pdfgen import canvas
+
+c = canvas.Canvas("hello.pdf", pagesize=letter)
+width, height = letter
+
+# Add text
+c.drawString(100, height - 100, "Hello World!")
+c.drawString(100, height - 120, "This is a PDF created with reportlab")
+
+# Add a line
+c.line(100, height - 140, 400, height - 140)
+
+# Save
+c.save()
+```
+
+#### Create PDF with Multiple Pages
+```python
+from reportlab.lib.pagesizes import letter
+from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak
+from reportlab.lib.styles import getSampleStyleSheet
+
+doc = SimpleDocTemplate("report.pdf", pagesize=letter)
+styles = getSampleStyleSheet()
+story = []
+
+# Add content
+title = Paragraph("Report Title", styles['Title'])
+story.append(title)
+story.append(Spacer(1, 12))
+
+body = Paragraph("This is the body of the report. " * 20, styles['Normal'])
+story.append(body)
+story.append(PageBreak())
+
+# Page 2
+story.append(Paragraph("Page 2", styles['Heading1']))
+story.append(Paragraph("Content for page 2", styles['Normal']))
+
+# Build PDF
+doc.build(story)
+```
+
+#### Subscripts and Superscripts
+
+**IMPORTANT**: Never use Unicode subscript/superscript characters (₀₁₂₃₄₅₆₇₈₉, ⁰¹²³⁴⁵⁶⁷⁸⁹) in ReportLab PDFs. The built-in fonts do not include these glyphs, causing them to render as solid black boxes.
+
+Instead, use ReportLab's XML markup tags in Paragraph objects:
+```python
+from reportlab.platypus import Paragraph
+from reportlab.lib.styles import getSampleStyleSheet
+
+styles = getSampleStyleSheet()
+
+# Subscripts: use tag
+chemical = Paragraph("H2 O", styles['Normal'])
+
+# Superscripts: use tag
+squared = Paragraph("x2 + y2 ", styles['Normal'])
+```
+
+For canvas-drawn text (not Paragraph objects), manually adjust font the size and position rather than using Unicode subscripts/superscripts.
+
+## Command-Line Tools
+
+### pdftotext (poppler-utils)
+```bash
+# Extract text
+pdftotext input.pdf output.txt
+
+# Extract text preserving layout
+pdftotext -layout input.pdf output.txt
+
+# Extract specific pages
+pdftotext -f 1 -l 5 input.pdf output.txt # Pages 1-5
+```
+
+### qpdf
+```bash
+# Merge PDFs
+qpdf --empty --pages file1.pdf file2.pdf -- merged.pdf
+
+# Split pages
+qpdf input.pdf --pages . 1-5 -- pages1-5.pdf
+qpdf input.pdf --pages . 6-10 -- pages6-10.pdf
+
+# Rotate pages
+qpdf input.pdf output.pdf --rotate=+90:1 # Rotate page 1 by 90 degrees
+
+# Remove password
+qpdf --password=mypassword --decrypt encrypted.pdf decrypted.pdf
+```
+
+### pdftk (if available)
+```bash
+# Merge
+pdftk file1.pdf file2.pdf cat output merged.pdf
+
+# Split
+pdftk input.pdf burst
+
+# Rotate
+pdftk input.pdf rotate 1east output rotated.pdf
+```
+
+## Common Tasks
+
+### Extract Text from Scanned PDFs
+```python
+# Requires: pip install pytesseract pdf2image
+import pytesseract
+from pdf2image import convert_from_path
+
+# Convert PDF to images
+images = convert_from_path('scanned.pdf')
+
+# OCR each page
+text = ""
+for i, image in enumerate(images):
+ text += f"Page {i+1}:\n"
+ text += pytesseract.image_to_string(image)
+ text += "\n\n"
+
+print(text)
+```
+
+### Add Watermark
+```python
+from pypdf import PdfReader, PdfWriter
+
+# Create watermark (or load existing)
+watermark = PdfReader("watermark.pdf").pages[0]
+
+# Apply to all pages
+reader = PdfReader("document.pdf")
+writer = PdfWriter()
+
+for page in reader.pages:
+ page.merge_page(watermark)
+ writer.add_page(page)
+
+with open("watermarked.pdf", "wb") as output:
+ writer.write(output)
+```
+
+### Extract Images
+```bash
+# Using pdfimages (poppler-utils)
+pdfimages -j input.pdf output_prefix
+
+# This extracts all images as output_prefix-000.jpg, output_prefix-001.jpg, etc.
+```
+
+### Password Protection
+```python
+from pypdf import PdfReader, PdfWriter
+
+reader = PdfReader("input.pdf")
+writer = PdfWriter()
+
+for page in reader.pages:
+ writer.add_page(page)
+
+# Add password
+writer.encrypt("userpassword", "ownerpassword")
+
+with open("encrypted.pdf", "wb") as output:
+ writer.write(output)
+```
+
+## Quick Reference
+
+| Task | Best Tool | Command/Code |
+|------|-----------|--------------|
+| Merge PDFs | pypdf | `writer.add_page(page)` |
+| Split PDFs | pypdf | One page per file |
+| Extract text | pdfplumber | `page.extract_text()` |
+| Extract tables | pdfplumber | `page.extract_tables()` |
+| Create PDFs | reportlab | Canvas or Platypus |
+| Command line merge | qpdf | `qpdf --empty --pages ...` |
+| OCR scanned PDFs | pytesseract | Convert to image first |
+| Fill PDF forms | pdf-lib or pypdf (see FORMS.md) | See FORMS.md |
+
+## Next Steps
+
+- For advanced pypdfium2 usage, see REFERENCE.md
+- For JavaScript libraries (pdf-lib), see REFERENCE.md
+- If you need to fill out a PDF form, follow the instructions in FORMS.md
+- For troubleshooting guides, see REFERENCE.md
diff --git a/.claude/skills/pdf/forms.md b/.claude/skills/pdf/forms.md
new file mode 100644
index 0000000..6e7e1e0
--- /dev/null
+++ b/.claude/skills/pdf/forms.md
@@ -0,0 +1,294 @@
+**CRITICAL: You MUST complete these steps in order. Do not skip ahead to writing code.**
+
+If you need to fill out a PDF form, first check to see if the PDF has fillable form fields. Run this script from this file's directory:
+ `python scripts/check_fillable_fields `, and depending on the result go to either the "Fillable fields" or "Non-fillable fields" and follow those instructions.
+
+# Fillable fields
+If the PDF has fillable form fields:
+- Run this script from this file's directory: `python scripts/extract_form_field_info.py `. It will create a JSON file with a list of fields in this format:
+```
+[
+ {
+ "field_id": (unique ID for the field),
+ "page": (page number, 1-based),
+ "rect": ([left, bottom, right, top] bounding box in PDF coordinates, y=0 is the bottom of the page),
+ "type": ("text", "checkbox", "radio_group", or "choice"),
+ },
+ // Checkboxes have "checked_value" and "unchecked_value" properties:
+ {
+ "field_id": (unique ID for the field),
+ "page": (page number, 1-based),
+ "type": "checkbox",
+ "checked_value": (Set the field to this value to check the checkbox),
+ "unchecked_value": (Set the field to this value to uncheck the checkbox),
+ },
+ // Radio groups have a "radio_options" list with the possible choices.
+ {
+ "field_id": (unique ID for the field),
+ "page": (page number, 1-based),
+ "type": "radio_group",
+ "radio_options": [
+ {
+ "value": (set the field to this value to select this radio option),
+ "rect": (bounding box for the radio button for this option)
+ },
+ // Other radio options
+ ]
+ },
+ // Multiple choice fields have a "choice_options" list with the possible choices:
+ {
+ "field_id": (unique ID for the field),
+ "page": (page number, 1-based),
+ "type": "choice",
+ "choice_options": [
+ {
+ "value": (set the field to this value to select this option),
+ "text": (display text of the option)
+ },
+ // Other choice options
+ ],
+ }
+]
+```
+- Convert the PDF to PNGs (one image for each page) with this script (run from this file's directory):
+`python scripts/convert_pdf_to_images.py `
+Then analyze the images to determine the purpose of each form field (make sure to convert the bounding box PDF coordinates to image coordinates).
+- Create a `field_values.json` file in this format with the values to be entered for each field:
+```
+[
+ {
+ "field_id": "last_name", // Must match the field_id from `extract_form_field_info.py`
+ "description": "The user's last name",
+ "page": 1, // Must match the "page" value in field_info.json
+ "value": "Simpson"
+ },
+ {
+ "field_id": "Checkbox12",
+ "description": "Checkbox to be checked if the user is 18 or over",
+ "page": 1,
+ "value": "/On" // If this is a checkbox, use its "checked_value" value to check it. If it's a radio button group, use one of the "value" values in "radio_options".
+ },
+ // more fields
+]
+```
+- Run the `fill_fillable_fields.py` script from this file's directory to create a filled-in PDF:
+`python scripts/fill_fillable_fields.py `
+This script will verify that the field IDs and values you provide are valid; if it prints error messages, correct the appropriate fields and try again.
+
+# Non-fillable fields
+If the PDF doesn't have fillable form fields, you'll add text annotations. First try to extract coordinates from the PDF structure (more accurate), then fall back to visual estimation if needed.
+
+## Step 1: Try Structure Extraction First
+
+Run this script to extract text labels, lines, and checkboxes with their exact PDF coordinates:
+`python scripts/extract_form_structure.py form_structure.json`
+
+This creates a JSON file containing:
+- **labels**: Every text element with exact coordinates (x0, top, x1, bottom in PDF points)
+- **lines**: Horizontal lines that define row boundaries
+- **checkboxes**: Small square rectangles that are checkboxes (with center coordinates)
+- **row_boundaries**: Row top/bottom positions calculated from horizontal lines
+
+**Check the results**: If `form_structure.json` has meaningful labels (text elements that correspond to form fields), use **Approach A: Structure-Based Coordinates**. If the PDF is scanned/image-based and has few or no labels, use **Approach B: Visual Estimation**.
+
+---
+
+## Approach A: Structure-Based Coordinates (Preferred)
+
+Use this when `extract_form_structure.py` found text labels in the PDF.
+
+### A.1: Analyze the Structure
+
+Read form_structure.json and identify:
+
+1. **Label groups**: Adjacent text elements that form a single label (e.g., "Last" + "Name")
+2. **Row structure**: Labels with similar `top` values are in the same row
+3. **Field columns**: Entry areas start after label ends (x0 = label.x1 + gap)
+4. **Checkboxes**: Use the checkbox coordinates directly from the structure
+
+**Coordinate system**: PDF coordinates where y=0 is at TOP of page, y increases downward.
+
+### A.2: Check for Missing Elements
+
+The structure extraction may not detect all form elements. Common cases:
+- **Circular checkboxes**: Only square rectangles are detected as checkboxes
+- **Complex graphics**: Decorative elements or non-standard form controls
+- **Faded or light-colored elements**: May not be extracted
+
+If you see form fields in the PDF images that aren't in form_structure.json, you'll need to use **visual analysis** for those specific fields (see "Hybrid Approach" below).
+
+### A.3: Create fields.json with PDF Coordinates
+
+For each field, calculate entry coordinates from the extracted structure:
+
+**Text fields:**
+- entry x0 = label x1 + 5 (small gap after label)
+- entry x1 = next label's x0, or row boundary
+- entry top = same as label top
+- entry bottom = row boundary line below, or label bottom + row_height
+
+**Checkboxes:**
+- Use the checkbox rectangle coordinates directly from form_structure.json
+- entry_bounding_box = [checkbox.x0, checkbox.top, checkbox.x1, checkbox.bottom]
+
+Create fields.json using `pdf_width` and `pdf_height` (signals PDF coordinates):
+```json
+{
+ "pages": [
+ {"page_number": 1, "pdf_width": 612, "pdf_height": 792}
+ ],
+ "form_fields": [
+ {
+ "page_number": 1,
+ "description": "Last name entry field",
+ "field_label": "Last Name",
+ "label_bounding_box": [43, 63, 87, 73],
+ "entry_bounding_box": [92, 63, 260, 79],
+ "entry_text": {"text": "Smith", "font_size": 10}
+ },
+ {
+ "page_number": 1,
+ "description": "US Citizen Yes checkbox",
+ "field_label": "Yes",
+ "label_bounding_box": [260, 200, 280, 210],
+ "entry_bounding_box": [285, 197, 292, 205],
+ "entry_text": {"text": "X"}
+ }
+ ]
+}
+```
+
+**Important**: Use `pdf_width`/`pdf_height` and coordinates directly from form_structure.json.
+
+### A.4: Validate Bounding Boxes
+
+Before filling, check your bounding boxes for errors:
+`python scripts/check_bounding_boxes.py fields.json`
+
+This checks for intersecting bounding boxes and entry boxes that are too small for the font size. Fix any reported errors before filling.
+
+---
+
+## Approach B: Visual Estimation (Fallback)
+
+Use this when the PDF is scanned/image-based and structure extraction found no usable text labels (e.g., all text shows as "(cid:X)" patterns).
+
+### B.1: Convert PDF to Images
+
+`python scripts/convert_pdf_to_images.py `
+
+### B.2: Initial Field Identification
+
+Examine each page image to identify form sections and get **rough estimates** of field locations:
+- Form field labels and their approximate positions
+- Entry areas (lines, boxes, or blank spaces for text input)
+- Checkboxes and their approximate locations
+
+For each field, note approximate pixel coordinates (they don't need to be precise yet).
+
+### B.3: Zoom Refinement (CRITICAL for accuracy)
+
+For each field, crop a region around the estimated position to refine coordinates precisely.
+
+**Create a zoomed crop using ImageMagick:**
+```bash
+magick -crop x++ +repage
+```
+
+Where:
+- `, ` = top-left corner of crop region (use your rough estimate minus padding)
+- `, ` = size of crop region (field area plus ~50px padding on each side)
+
+**Example:** To refine a "Name" field estimated around (100, 150):
+```bash
+magick images_dir/page_1.png -crop 300x80+50+120 +repage crops/name_field.png
+```
+
+(Note: if the `magick` command isn't available, try `convert` with the same arguments).
+
+**Examine the cropped image** to determine precise coordinates:
+1. Identify the exact pixel where the entry area begins (after the label)
+2. Identify where the entry area ends (before next field or edge)
+3. Identify the top and bottom of the entry line/box
+
+**Convert crop coordinates back to full image coordinates:**
+- full_x = crop_x + crop_offset_x
+- full_y = crop_y + crop_offset_y
+
+Example: If the crop started at (50, 120) and the entry box starts at (52, 18) within the crop:
+- entry_x0 = 52 + 50 = 102
+- entry_top = 18 + 120 = 138
+
+**Repeat for each field**, grouping nearby fields into single crops when possible.
+
+### B.4: Create fields.json with Refined Coordinates
+
+Create fields.json using `image_width` and `image_height` (signals image coordinates):
+```json
+{
+ "pages": [
+ {"page_number": 1, "image_width": 1700, "image_height": 2200}
+ ],
+ "form_fields": [
+ {
+ "page_number": 1,
+ "description": "Last name entry field",
+ "field_label": "Last Name",
+ "label_bounding_box": [120, 175, 242, 198],
+ "entry_bounding_box": [255, 175, 720, 218],
+ "entry_text": {"text": "Smith", "font_size": 10}
+ }
+ ]
+}
+```
+
+**Important**: Use `image_width`/`image_height` and the refined pixel coordinates from the zoom analysis.
+
+### B.5: Validate Bounding Boxes
+
+Before filling, check your bounding boxes for errors:
+`python scripts/check_bounding_boxes.py fields.json`
+
+This checks for intersecting bounding boxes and entry boxes that are too small for the font size. Fix any reported errors before filling.
+
+---
+
+## Hybrid Approach: Structure + Visual
+
+Use this when structure extraction works for most fields but misses some elements (e.g., circular checkboxes, unusual form controls).
+
+1. **Use Approach A** for fields that were detected in form_structure.json
+2. **Convert PDF to images** for visual analysis of missing fields
+3. **Use zoom refinement** (from Approach B) for the missing fields
+4. **Combine coordinates**: For fields from structure extraction, use `pdf_width`/`pdf_height`. For visually-estimated fields, you must convert image coordinates to PDF coordinates:
+ - pdf_x = image_x * (pdf_width / image_width)
+ - pdf_y = image_y * (pdf_height / image_height)
+5. **Use a single coordinate system** in fields.json - convert all to PDF coordinates with `pdf_width`/`pdf_height`
+
+---
+
+## Step 2: Validate Before Filling
+
+**Always validate bounding boxes before filling:**
+`python scripts/check_bounding_boxes.py fields.json`
+
+This checks for:
+- Intersecting bounding boxes (which would cause overlapping text)
+- Entry boxes that are too small for the specified font size
+
+Fix any reported errors in fields.json before proceeding.
+
+## Step 3: Fill the Form
+
+The fill script auto-detects the coordinate system and handles conversion:
+`python scripts/fill_pdf_form_with_annotations.py fields.json `
+
+## Step 4: Verify Output
+
+Convert the filled PDF to images and verify text placement:
+`python scripts/convert_pdf_to_images.py `
+
+If text is mispositioned:
+- **Approach A**: Check that you're using PDF coordinates from form_structure.json with `pdf_width`/`pdf_height`
+- **Approach B**: Check that image dimensions match and coordinates are accurate pixels
+- **Hybrid**: Ensure coordinate conversions are correct for visually-estimated fields
diff --git a/.claude/skills/pdf/reference.md b/.claude/skills/pdf/reference.md
new file mode 100644
index 0000000..41400bf
--- /dev/null
+++ b/.claude/skills/pdf/reference.md
@@ -0,0 +1,612 @@
+# PDF Processing Advanced Reference
+
+This document contains advanced PDF processing features, detailed examples, and additional libraries not covered in the main skill instructions.
+
+## pypdfium2 Library (Apache/BSD License)
+
+### Overview
+pypdfium2 is a Python binding for PDFium (Chromium's PDF library). It's excellent for fast PDF rendering, image generation, and serves as a PyMuPDF replacement.
+
+### Render PDF to Images
+```python
+import pypdfium2 as pdfium
+from PIL import Image
+
+# Load PDF
+pdf = pdfium.PdfDocument("document.pdf")
+
+# Render page to image
+page = pdf[0] # First page
+bitmap = page.render(
+ scale=2.0, # Higher resolution
+ rotation=0 # No rotation
+)
+
+# Convert to PIL Image
+img = bitmap.to_pil()
+img.save("page_1.png", "PNG")
+
+# Process multiple pages
+for i, page in enumerate(pdf):
+ bitmap = page.render(scale=1.5)
+ img = bitmap.to_pil()
+ img.save(f"page_{i+1}.jpg", "JPEG", quality=90)
+```
+
+### Extract Text with pypdfium2
+```python
+import pypdfium2 as pdfium
+
+pdf = pdfium.PdfDocument("document.pdf")
+for i, page in enumerate(pdf):
+ text = page.get_text()
+ print(f"Page {i+1} text length: {len(text)} chars")
+```
+
+## JavaScript Libraries
+
+### pdf-lib (MIT License)
+
+pdf-lib is a powerful JavaScript library for creating and modifying PDF documents in any JavaScript environment.
+
+#### Load and Manipulate Existing PDF
+```javascript
+import { PDFDocument } from 'pdf-lib';
+import fs from 'fs';
+
+async function manipulatePDF() {
+ // Load existing PDF
+ const existingPdfBytes = fs.readFileSync('input.pdf');
+ const pdfDoc = await PDFDocument.load(existingPdfBytes);
+
+ // Get page count
+ const pageCount = pdfDoc.getPageCount();
+ console.log(`Document has ${pageCount} pages`);
+
+ // Add new page
+ const newPage = pdfDoc.addPage([600, 400]);
+ newPage.drawText('Added by pdf-lib', {
+ x: 100,
+ y: 300,
+ size: 16
+ });
+
+ // Save modified PDF
+ const pdfBytes = await pdfDoc.save();
+ fs.writeFileSync('modified.pdf', pdfBytes);
+}
+```
+
+#### Create Complex PDFs from Scratch
+```javascript
+import { PDFDocument, rgb, StandardFonts } from 'pdf-lib';
+import fs from 'fs';
+
+async function createPDF() {
+ const pdfDoc = await PDFDocument.create();
+
+ // Add fonts
+ const helveticaFont = await pdfDoc.embedFont(StandardFonts.Helvetica);
+ const helveticaBold = await pdfDoc.embedFont(StandardFonts.HelveticaBold);
+
+ // Add page
+ const page = pdfDoc.addPage([595, 842]); // A4 size
+ const { width, height } = page.getSize();
+
+ // Add text with styling
+ page.drawText('Invoice #12345', {
+ x: 50,
+ y: height - 50,
+ size: 18,
+ font: helveticaBold,
+ color: rgb(0.2, 0.2, 0.8)
+ });
+
+ // Add rectangle (header background)
+ page.drawRectangle({
+ x: 40,
+ y: height - 100,
+ width: width - 80,
+ height: 30,
+ color: rgb(0.9, 0.9, 0.9)
+ });
+
+ // Add table-like content
+ const items = [
+ ['Item', 'Qty', 'Price', 'Total'],
+ ['Widget', '2', '$50', '$100'],
+ ['Gadget', '1', '$75', '$75']
+ ];
+
+ let yPos = height - 150;
+ items.forEach(row => {
+ let xPos = 50;
+ row.forEach(cell => {
+ page.drawText(cell, {
+ x: xPos,
+ y: yPos,
+ size: 12,
+ font: helveticaFont
+ });
+ xPos += 120;
+ });
+ yPos -= 25;
+ });
+
+ const pdfBytes = await pdfDoc.save();
+ fs.writeFileSync('created.pdf', pdfBytes);
+}
+```
+
+#### Advanced Merge and Split Operations
+```javascript
+import { PDFDocument } from 'pdf-lib';
+import fs from 'fs';
+
+async function mergePDFs() {
+ // Create new document
+ const mergedPdf = await PDFDocument.create();
+
+ // Load source PDFs
+ const pdf1Bytes = fs.readFileSync('doc1.pdf');
+ const pdf2Bytes = fs.readFileSync('doc2.pdf');
+
+ const pdf1 = await PDFDocument.load(pdf1Bytes);
+ const pdf2 = await PDFDocument.load(pdf2Bytes);
+
+ // Copy pages from first PDF
+ const pdf1Pages = await mergedPdf.copyPages(pdf1, pdf1.getPageIndices());
+ pdf1Pages.forEach(page => mergedPdf.addPage(page));
+
+ // Copy specific pages from second PDF (pages 0, 2, 4)
+ const pdf2Pages = await mergedPdf.copyPages(pdf2, [0, 2, 4]);
+ pdf2Pages.forEach(page => mergedPdf.addPage(page));
+
+ const mergedPdfBytes = await mergedPdf.save();
+ fs.writeFileSync('merged.pdf', mergedPdfBytes);
+}
+```
+
+### pdfjs-dist (Apache License)
+
+PDF.js is Mozilla's JavaScript library for rendering PDFs in the browser.
+
+#### Basic PDF Loading and Rendering
+```javascript
+import * as pdfjsLib from 'pdfjs-dist';
+
+// Configure worker (important for performance)
+pdfjsLib.GlobalWorkerOptions.workerSrc = './pdf.worker.js';
+
+async function renderPDF() {
+ // Load PDF
+ const loadingTask = pdfjsLib.getDocument('document.pdf');
+ const pdf = await loadingTask.promise;
+
+ console.log(`Loaded PDF with ${pdf.numPages} pages`);
+
+ // Get first page
+ const page = await pdf.getPage(1);
+ const viewport = page.getViewport({ scale: 1.5 });
+
+ // Render to canvas
+ const canvas = document.createElement('canvas');
+ const context = canvas.getContext('2d');
+ canvas.height = viewport.height;
+ canvas.width = viewport.width;
+
+ const renderContext = {
+ canvasContext: context,
+ viewport: viewport
+ };
+
+ await page.render(renderContext).promise;
+ document.body.appendChild(canvas);
+}
+```
+
+#### Extract Text with Coordinates
+```javascript
+import * as pdfjsLib from 'pdfjs-dist';
+
+async function extractText() {
+ const loadingTask = pdfjsLib.getDocument('document.pdf');
+ const pdf = await loadingTask.promise;
+
+ let fullText = '';
+
+ // Extract text from all pages
+ for (let i = 1; i <= pdf.numPages; i++) {
+ const page = await pdf.getPage(i);
+ const textContent = await page.getTextContent();
+
+ const pageText = textContent.items
+ .map(item => item.str)
+ .join(' ');
+
+ fullText += `\n--- Page ${i} ---\n${pageText}`;
+
+ // Get text with coordinates for advanced processing
+ const textWithCoords = textContent.items.map(item => ({
+ text: item.str,
+ x: item.transform[4],
+ y: item.transform[5],
+ width: item.width,
+ height: item.height
+ }));
+ }
+
+ console.log(fullText);
+ return fullText;
+}
+```
+
+#### Extract Annotations and Forms
+```javascript
+import * as pdfjsLib from 'pdfjs-dist';
+
+async function extractAnnotations() {
+ const loadingTask = pdfjsLib.getDocument('annotated.pdf');
+ const pdf = await loadingTask.promise;
+
+ for (let i = 1; i <= pdf.numPages; i++) {
+ const page = await pdf.getPage(i);
+ const annotations = await page.getAnnotations();
+
+ annotations.forEach(annotation => {
+ console.log(`Annotation type: ${annotation.subtype}`);
+ console.log(`Content: ${annotation.contents}`);
+ console.log(`Coordinates: ${JSON.stringify(annotation.rect)}`);
+ });
+ }
+}
+```
+
+## Advanced Command-Line Operations
+
+### poppler-utils Advanced Features
+
+#### Extract Text with Bounding Box Coordinates
+```bash
+# Extract text with bounding box coordinates (essential for structured data)
+pdftotext -bbox-layout document.pdf output.xml
+
+# The XML output contains precise coordinates for each text element
+```
+
+#### Advanced Image Conversion
+```bash
+# Convert to PNG images with specific resolution
+pdftoppm -png -r 300 document.pdf output_prefix
+
+# Convert specific page range with high resolution
+pdftoppm -png -r 600 -f 1 -l 3 document.pdf high_res_pages
+
+# Convert to JPEG with quality setting
+pdftoppm -jpeg -jpegopt quality=85 -r 200 document.pdf jpeg_output
+```
+
+#### Extract Embedded Images
+```bash
+# Extract all embedded images with metadata
+pdfimages -j -p document.pdf page_images
+
+# List image info without extracting
+pdfimages -list document.pdf
+
+# Extract images in their original format
+pdfimages -all document.pdf images/img
+```
+
+### qpdf Advanced Features
+
+#### Complex Page Manipulation
+```bash
+# Split PDF into groups of pages
+qpdf --split-pages=3 input.pdf output_group_%02d.pdf
+
+# Extract specific pages with complex ranges
+qpdf input.pdf --pages input.pdf 1,3-5,8,10-end -- extracted.pdf
+
+# Merge specific pages from multiple PDFs
+qpdf --empty --pages doc1.pdf 1-3 doc2.pdf 5-7 doc3.pdf 2,4 -- combined.pdf
+```
+
+#### PDF Optimization and Repair
+```bash
+# Optimize PDF for web (linearize for streaming)
+qpdf --linearize input.pdf optimized.pdf
+
+# Remove unused objects and compress
+qpdf --optimize-level=all input.pdf compressed.pdf
+
+# Attempt to repair corrupted PDF structure
+qpdf --check input.pdf
+qpdf --fix-qdf damaged.pdf repaired.pdf
+
+# Show detailed PDF structure for debugging
+qpdf --show-all-pages input.pdf > structure.txt
+```
+
+#### Advanced Encryption
+```bash
+# Add password protection with specific permissions
+qpdf --encrypt user_pass owner_pass 256 --print=none --modify=none -- input.pdf encrypted.pdf
+
+# Check encryption status
+qpdf --show-encryption encrypted.pdf
+
+# Remove password protection (requires password)
+qpdf --password=secret123 --decrypt encrypted.pdf decrypted.pdf
+```
+
+## Advanced Python Techniques
+
+### pdfplumber Advanced Features
+
+#### Extract Text with Precise Coordinates
+```python
+import pdfplumber
+
+with pdfplumber.open("document.pdf") as pdf:
+ page = pdf.pages[0]
+
+ # Extract all text with coordinates
+ chars = page.chars
+ for char in chars[:10]: # First 10 characters
+ print(f"Char: '{char['text']}' at x:{char['x0']:.1f} y:{char['y0']:.1f}")
+
+ # Extract text by bounding box (left, top, right, bottom)
+ bbox_text = page.within_bbox((100, 100, 400, 200)).extract_text()
+```
+
+#### Advanced Table Extraction with Custom Settings
+```python
+import pdfplumber
+import pandas as pd
+
+with pdfplumber.open("complex_table.pdf") as pdf:
+ page = pdf.pages[0]
+
+ # Extract tables with custom settings for complex layouts
+ table_settings = {
+ "vertical_strategy": "lines",
+ "horizontal_strategy": "lines",
+ "snap_tolerance": 3,
+ "intersection_tolerance": 15
+ }
+ tables = page.extract_tables(table_settings)
+
+ # Visual debugging for table extraction
+ img = page.to_image(resolution=150)
+ img.save("debug_layout.png")
+```
+
+### reportlab Advanced Features
+
+#### Create Professional Reports with Tables
+```python
+from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
+from reportlab.lib.styles import getSampleStyleSheet
+from reportlab.lib import colors
+
+# Sample data
+data = [
+ ['Product', 'Q1', 'Q2', 'Q3', 'Q4'],
+ ['Widgets', '120', '135', '142', '158'],
+ ['Gadgets', '85', '92', '98', '105']
+]
+
+# Create PDF with table
+doc = SimpleDocTemplate("report.pdf")
+elements = []
+
+# Add title
+styles = getSampleStyleSheet()
+title = Paragraph("Quarterly Sales Report", styles['Title'])
+elements.append(title)
+
+# Add table with advanced styling
+table = Table(data)
+table.setStyle(TableStyle([
+ ('BACKGROUND', (0, 0), (-1, 0), colors.grey),
+ ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
+ ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
+ ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
+ ('FONTSIZE', (0, 0), (-1, 0), 14),
+ ('BOTTOMPADDING', (0, 0), (-1, 0), 12),
+ ('BACKGROUND', (0, 1), (-1, -1), colors.beige),
+ ('GRID', (0, 0), (-1, -1), 1, colors.black)
+]))
+elements.append(table)
+
+doc.build(elements)
+```
+
+## Complex Workflows
+
+### Extract Figures/Images from PDF
+
+#### Method 1: Using pdfimages (fastest)
+```bash
+# Extract all images with original quality
+pdfimages -all document.pdf images/img
+```
+
+#### Method 2: Using pypdfium2 + Image Processing
+```python
+import pypdfium2 as pdfium
+from PIL import Image
+import numpy as np
+
+def extract_figures(pdf_path, output_dir):
+ pdf = pdfium.PdfDocument(pdf_path)
+
+ for page_num, page in enumerate(pdf):
+ # Render high-resolution page
+ bitmap = page.render(scale=3.0)
+ img = bitmap.to_pil()
+
+ # Convert to numpy for processing
+ img_array = np.array(img)
+
+ # Simple figure detection (non-white regions)
+ mask = np.any(img_array != [255, 255, 255], axis=2)
+
+ # Find contours and extract bounding boxes
+ # (This is simplified - real implementation would need more sophisticated detection)
+
+ # Save detected figures
+ # ... implementation depends on specific needs
+```
+
+### Batch PDF Processing with Error Handling
+```python
+import os
+import glob
+from pypdf import PdfReader, PdfWriter
+import logging
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+def batch_process_pdfs(input_dir, operation='merge'):
+ pdf_files = glob.glob(os.path.join(input_dir, "*.pdf"))
+
+ if operation == 'merge':
+ writer = PdfWriter()
+ for pdf_file in pdf_files:
+ try:
+ reader = PdfReader(pdf_file)
+ for page in reader.pages:
+ writer.add_page(page)
+ logger.info(f"Processed: {pdf_file}")
+ except Exception as e:
+ logger.error(f"Failed to process {pdf_file}: {e}")
+ continue
+
+ with open("batch_merged.pdf", "wb") as output:
+ writer.write(output)
+
+ elif operation == 'extract_text':
+ for pdf_file in pdf_files:
+ try:
+ reader = PdfReader(pdf_file)
+ text = ""
+ for page in reader.pages:
+ text += page.extract_text()
+
+ output_file = pdf_file.replace('.pdf', '.txt')
+ with open(output_file, 'w', encoding='utf-8') as f:
+ f.write(text)
+ logger.info(f"Extracted text from: {pdf_file}")
+
+ except Exception as e:
+ logger.error(f"Failed to extract text from {pdf_file}: {e}")
+ continue
+```
+
+### Advanced PDF Cropping
+```python
+from pypdf import PdfWriter, PdfReader
+
+reader = PdfReader("input.pdf")
+writer = PdfWriter()
+
+# Crop page (left, bottom, right, top in points)
+page = reader.pages[0]
+page.mediabox.left = 50
+page.mediabox.bottom = 50
+page.mediabox.right = 550
+page.mediabox.top = 750
+
+writer.add_page(page)
+with open("cropped.pdf", "wb") as output:
+ writer.write(output)
+```
+
+## Performance Optimization Tips
+
+### 1. For Large PDFs
+- Use streaming approaches instead of loading entire PDF in memory
+- Use `qpdf --split-pages` for splitting large files
+- Process pages individually with pypdfium2
+
+### 2. For Text Extraction
+- `pdftotext -bbox-layout` is fastest for plain text extraction
+- Use pdfplumber for structured data and tables
+- Avoid `pypdf.extract_text()` for very large documents
+
+### 3. For Image Extraction
+- `pdfimages` is much faster than rendering pages
+- Use low resolution for previews, high resolution for final output
+
+### 4. For Form Filling
+- pdf-lib maintains form structure better than most alternatives
+- Pre-validate form fields before processing
+
+### 5. Memory Management
+```python
+# Process PDFs in chunks
+def process_large_pdf(pdf_path, chunk_size=10):
+ reader = PdfReader(pdf_path)
+ total_pages = len(reader.pages)
+
+ for start_idx in range(0, total_pages, chunk_size):
+ end_idx = min(start_idx + chunk_size, total_pages)
+ writer = PdfWriter()
+
+ for i in range(start_idx, end_idx):
+ writer.add_page(reader.pages[i])
+
+ # Process chunk
+ with open(f"chunk_{start_idx//chunk_size}.pdf", "wb") as output:
+ writer.write(output)
+```
+
+## Troubleshooting Common Issues
+
+### Encrypted PDFs
+```python
+# Handle password-protected PDFs
+from pypdf import PdfReader
+
+try:
+ reader = PdfReader("encrypted.pdf")
+ if reader.is_encrypted:
+ reader.decrypt("password")
+except Exception as e:
+ print(f"Failed to decrypt: {e}")
+```
+
+### Corrupted PDFs
+```bash
+# Use qpdf to repair
+qpdf --check corrupted.pdf
+qpdf --replace-input corrupted.pdf
+```
+
+### Text Extraction Issues
+```python
+# Fallback to OCR for scanned PDFs
+import pytesseract
+from pdf2image import convert_from_path
+
+def extract_text_with_ocr(pdf_path):
+ images = convert_from_path(pdf_path)
+ text = ""
+ for i, image in enumerate(images):
+ text += pytesseract.image_to_string(image)
+ return text
+```
+
+## License Information
+
+- **pypdf**: BSD License
+- **pdfplumber**: MIT License
+- **pypdfium2**: Apache/BSD License
+- **reportlab**: BSD License
+- **poppler-utils**: GPL-2 License
+- **qpdf**: Apache License
+- **pdf-lib**: MIT License
+- **pdfjs-dist**: Apache License
\ No newline at end of file
diff --git a/.claude/skills/pdf/scripts/check_bounding_boxes.py b/.claude/skills/pdf/scripts/check_bounding_boxes.py
new file mode 100644
index 0000000..2cc5e34
--- /dev/null
+++ b/.claude/skills/pdf/scripts/check_bounding_boxes.py
@@ -0,0 +1,65 @@
+from dataclasses import dataclass
+import json
+import sys
+
+
+
+
+@dataclass
+class RectAndField:
+ rect: list[float]
+ rect_type: str
+ field: dict
+
+
+def get_bounding_box_messages(fields_json_stream) -> list[str]:
+ messages = []
+ fields = json.load(fields_json_stream)
+ messages.append(f"Read {len(fields['form_fields'])} fields")
+
+ def rects_intersect(r1, r2):
+ disjoint_horizontal = r1[0] >= r2[2] or r1[2] <= r2[0]
+ disjoint_vertical = r1[1] >= r2[3] or r1[3] <= r2[1]
+ return not (disjoint_horizontal or disjoint_vertical)
+
+ rects_and_fields = []
+ for f in fields["form_fields"]:
+ rects_and_fields.append(RectAndField(f["label_bounding_box"], "label", f))
+ rects_and_fields.append(RectAndField(f["entry_bounding_box"], "entry", f))
+
+ has_error = False
+ for i, ri in enumerate(rects_and_fields):
+ for j in range(i + 1, len(rects_and_fields)):
+ rj = rects_and_fields[j]
+ if ri.field["page_number"] == rj.field["page_number"] and rects_intersect(ri.rect, rj.rect):
+ has_error = True
+ if ri.field is rj.field:
+ messages.append(f"FAILURE: intersection between label and entry bounding boxes for `{ri.field['description']}` ({ri.rect}, {rj.rect})")
+ else:
+ messages.append(f"FAILURE: intersection between {ri.rect_type} bounding box for `{ri.field['description']}` ({ri.rect}) and {rj.rect_type} bounding box for `{rj.field['description']}` ({rj.rect})")
+ if len(messages) >= 20:
+ messages.append("Aborting further checks; fix bounding boxes and try again")
+ return messages
+ if ri.rect_type == "entry":
+ if "entry_text" in ri.field:
+ font_size = ri.field["entry_text"].get("font_size", 14)
+ entry_height = ri.rect[3] - ri.rect[1]
+ if entry_height < font_size:
+ has_error = True
+ messages.append(f"FAILURE: entry bounding box height ({entry_height}) for `{ri.field['description']}` is too short for the text content (font size: {font_size}). Increase the box height or decrease the font size.")
+ if len(messages) >= 20:
+ messages.append("Aborting further checks; fix bounding boxes and try again")
+ return messages
+
+ if not has_error:
+ messages.append("SUCCESS: All bounding boxes are valid")
+ return messages
+
+if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print("Usage: check_bounding_boxes.py [fields.json]")
+ sys.exit(1)
+ with open(sys.argv[1]) as f:
+ messages = get_bounding_box_messages(f)
+ for msg in messages:
+ print(msg)
diff --git a/.claude/skills/pdf/scripts/check_fillable_fields.py b/.claude/skills/pdf/scripts/check_fillable_fields.py
new file mode 100644
index 0000000..36dfb95
--- /dev/null
+++ b/.claude/skills/pdf/scripts/check_fillable_fields.py
@@ -0,0 +1,11 @@
+import sys
+from pypdf import PdfReader
+
+
+
+
+reader = PdfReader(sys.argv[1])
+if (reader.get_fields()):
+ print("This PDF has fillable form fields")
+else:
+ print("This PDF does not have fillable form fields; you will need to visually determine where to enter data")
diff --git a/.claude/skills/pdf/scripts/convert_pdf_to_images.py b/.claude/skills/pdf/scripts/convert_pdf_to_images.py
new file mode 100644
index 0000000..7939cef
--- /dev/null
+++ b/.claude/skills/pdf/scripts/convert_pdf_to_images.py
@@ -0,0 +1,33 @@
+import os
+import sys
+
+from pdf2image import convert_from_path
+
+
+
+
+def convert(pdf_path, output_dir, max_dim=1000):
+ images = convert_from_path(pdf_path, dpi=200)
+
+ for i, image in enumerate(images):
+ width, height = image.size
+ if width > max_dim or height > max_dim:
+ scale_factor = min(max_dim / width, max_dim / height)
+ new_width = int(width * scale_factor)
+ new_height = int(height * scale_factor)
+ image = image.resize((new_width, new_height))
+
+ image_path = os.path.join(output_dir, f"page_{i+1}.png")
+ image.save(image_path)
+ print(f"Saved page {i+1} as {image_path} (size: {image.size})")
+
+ print(f"Converted {len(images)} pages to PNG images")
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ print("Usage: convert_pdf_to_images.py [input pdf] [output directory]")
+ sys.exit(1)
+ pdf_path = sys.argv[1]
+ output_directory = sys.argv[2]
+ convert(pdf_path, output_directory)
diff --git a/.claude/skills/pdf/scripts/create_validation_image.py b/.claude/skills/pdf/scripts/create_validation_image.py
new file mode 100644
index 0000000..10eadd8
--- /dev/null
+++ b/.claude/skills/pdf/scripts/create_validation_image.py
@@ -0,0 +1,37 @@
+import json
+import sys
+
+from PIL import Image, ImageDraw
+
+
+
+
+def create_validation_image(page_number, fields_json_path, input_path, output_path):
+ with open(fields_json_path, 'r') as f:
+ data = json.load(f)
+
+ img = Image.open(input_path)
+ draw = ImageDraw.Draw(img)
+ num_boxes = 0
+
+ for field in data["form_fields"]:
+ if field["page_number"] == page_number:
+ entry_box = field['entry_bounding_box']
+ label_box = field['label_bounding_box']
+ draw.rectangle(entry_box, outline='red', width=2)
+ draw.rectangle(label_box, outline='blue', width=2)
+ num_boxes += 2
+
+ img.save(output_path)
+ print(f"Created validation image at {output_path} with {num_boxes} bounding boxes")
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 5:
+ print("Usage: create_validation_image.py [page number] [fields.json file] [input image path] [output image path]")
+ sys.exit(1)
+ page_number = int(sys.argv[1])
+ fields_json_path = sys.argv[2]
+ input_image_path = sys.argv[3]
+ output_image_path = sys.argv[4]
+ create_validation_image(page_number, fields_json_path, input_image_path, output_image_path)
diff --git a/.claude/skills/pdf/scripts/extract_form_field_info.py b/.claude/skills/pdf/scripts/extract_form_field_info.py
new file mode 100644
index 0000000..64cd470
--- /dev/null
+++ b/.claude/skills/pdf/scripts/extract_form_field_info.py
@@ -0,0 +1,122 @@
+import json
+import sys
+
+from pypdf import PdfReader
+
+
+
+
+def get_full_annotation_field_id(annotation):
+ components = []
+ while annotation:
+ field_name = annotation.get('/T')
+ if field_name:
+ components.append(field_name)
+ annotation = annotation.get('/Parent')
+ return ".".join(reversed(components)) if components else None
+
+
+def make_field_dict(field, field_id):
+ field_dict = {"field_id": field_id}
+ ft = field.get('/FT')
+ if ft == "/Tx":
+ field_dict["type"] = "text"
+ elif ft == "/Btn":
+ field_dict["type"] = "checkbox"
+ states = field.get("/_States_", [])
+ if len(states) == 2:
+ if "/Off" in states:
+ field_dict["checked_value"] = states[0] if states[0] != "/Off" else states[1]
+ field_dict["unchecked_value"] = "/Off"
+ else:
+ print(f"Unexpected state values for checkbox `${field_id}`. Its checked and unchecked values may not be correct; if you're trying to check it, visually verify the results.")
+ field_dict["checked_value"] = states[0]
+ field_dict["unchecked_value"] = states[1]
+ elif ft == "/Ch":
+ field_dict["type"] = "choice"
+ states = field.get("/_States_", [])
+ field_dict["choice_options"] = [{
+ "value": state[0],
+ "text": state[1],
+ } for state in states]
+ else:
+ field_dict["type"] = f"unknown ({ft})"
+ return field_dict
+
+
+def get_field_info(reader: PdfReader):
+ fields = reader.get_fields()
+
+ field_info_by_id = {}
+ possible_radio_names = set()
+
+ for field_id, field in fields.items():
+ if field.get("/Kids"):
+ if field.get("/FT") == "/Btn":
+ possible_radio_names.add(field_id)
+ continue
+ field_info_by_id[field_id] = make_field_dict(field, field_id)
+
+
+ radio_fields_by_id = {}
+
+ for page_index, page in enumerate(reader.pages):
+ annotations = page.get('/Annots', [])
+ for ann in annotations:
+ field_id = get_full_annotation_field_id(ann)
+ if field_id in field_info_by_id:
+ field_info_by_id[field_id]["page"] = page_index + 1
+ field_info_by_id[field_id]["rect"] = ann.get('/Rect')
+ elif field_id in possible_radio_names:
+ try:
+ on_values = [v for v in ann["/AP"]["/N"] if v != "/Off"]
+ except KeyError:
+ continue
+ if len(on_values) == 1:
+ rect = ann.get("/Rect")
+ if field_id not in radio_fields_by_id:
+ radio_fields_by_id[field_id] = {
+ "field_id": field_id,
+ "type": "radio_group",
+ "page": page_index + 1,
+ "radio_options": [],
+ }
+ radio_fields_by_id[field_id]["radio_options"].append({
+ "value": on_values[0],
+ "rect": rect,
+ })
+
+ fields_with_location = []
+ for field_info in field_info_by_id.values():
+ if "page" in field_info:
+ fields_with_location.append(field_info)
+ else:
+ print(f"Unable to determine location for field id: {field_info.get('field_id')}, ignoring")
+
+ def sort_key(f):
+ if "radio_options" in f:
+ rect = f["radio_options"][0]["rect"] or [0, 0, 0, 0]
+ else:
+ rect = f.get("rect") or [0, 0, 0, 0]
+ adjusted_position = [-rect[1], rect[0]]
+ return [f.get("page"), adjusted_position]
+
+ sorted_fields = fields_with_location + list(radio_fields_by_id.values())
+ sorted_fields.sort(key=sort_key)
+
+ return sorted_fields
+
+
+def write_field_info(pdf_path: str, json_output_path: str):
+ reader = PdfReader(pdf_path)
+ field_info = get_field_info(reader)
+ with open(json_output_path, "w") as f:
+ json.dump(field_info, f, indent=2)
+ print(f"Wrote {len(field_info)} fields to {json_output_path}")
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ print("Usage: extract_form_field_info.py [input pdf] [output json]")
+ sys.exit(1)
+ write_field_info(sys.argv[1], sys.argv[2])
diff --git a/.claude/skills/pdf/scripts/extract_form_structure.py b/.claude/skills/pdf/scripts/extract_form_structure.py
new file mode 100644
index 0000000..f219e7d
--- /dev/null
+++ b/.claude/skills/pdf/scripts/extract_form_structure.py
@@ -0,0 +1,115 @@
+"""
+Extract form structure from a non-fillable PDF.
+
+This script analyzes the PDF to find:
+- Text labels with their exact coordinates
+- Horizontal lines (row boundaries)
+- Checkboxes (small rectangles)
+
+Output: A JSON file with the form structure that can be used to generate
+accurate field coordinates for filling.
+
+Usage: python extract_form_structure.py
+"""
+
+import json
+import sys
+import pdfplumber
+
+
+def extract_form_structure(pdf_path):
+ structure = {
+ "pages": [],
+ "labels": [],
+ "lines": [],
+ "checkboxes": [],
+ "row_boundaries": []
+ }
+
+ with pdfplumber.open(pdf_path) as pdf:
+ for page_num, page in enumerate(pdf.pages, 1):
+ structure["pages"].append({
+ "page_number": page_num,
+ "width": float(page.width),
+ "height": float(page.height)
+ })
+
+ words = page.extract_words()
+ for word in words:
+ structure["labels"].append({
+ "page": page_num,
+ "text": word["text"],
+ "x0": round(float(word["x0"]), 1),
+ "top": round(float(word["top"]), 1),
+ "x1": round(float(word["x1"]), 1),
+ "bottom": round(float(word["bottom"]), 1)
+ })
+
+ for line in page.lines:
+ if abs(float(line["x1"]) - float(line["x0"])) > page.width * 0.5:
+ structure["lines"].append({
+ "page": page_num,
+ "y": round(float(line["top"]), 1),
+ "x0": round(float(line["x0"]), 1),
+ "x1": round(float(line["x1"]), 1)
+ })
+
+ for rect in page.rects:
+ width = float(rect["x1"]) - float(rect["x0"])
+ height = float(rect["bottom"]) - float(rect["top"])
+ if 5 <= width <= 15 and 5 <= height <= 15 and abs(width - height) < 2:
+ structure["checkboxes"].append({
+ "page": page_num,
+ "x0": round(float(rect["x0"]), 1),
+ "top": round(float(rect["top"]), 1),
+ "x1": round(float(rect["x1"]), 1),
+ "bottom": round(float(rect["bottom"]), 1),
+ "center_x": round((float(rect["x0"]) + float(rect["x1"])) / 2, 1),
+ "center_y": round((float(rect["top"]) + float(rect["bottom"])) / 2, 1)
+ })
+
+ lines_by_page = {}
+ for line in structure["lines"]:
+ page = line["page"]
+ if page not in lines_by_page:
+ lines_by_page[page] = []
+ lines_by_page[page].append(line["y"])
+
+ for page, y_coords in lines_by_page.items():
+ y_coords = sorted(set(y_coords))
+ for i in range(len(y_coords) - 1):
+ structure["row_boundaries"].append({
+ "page": page,
+ "row_top": y_coords[i],
+ "row_bottom": y_coords[i + 1],
+ "row_height": round(y_coords[i + 1] - y_coords[i], 1)
+ })
+
+ return structure
+
+
+def main():
+ if len(sys.argv) != 3:
+ print("Usage: extract_form_structure.py ")
+ sys.exit(1)
+
+ pdf_path = sys.argv[1]
+ output_path = sys.argv[2]
+
+ print(f"Extracting structure from {pdf_path}...")
+ structure = extract_form_structure(pdf_path)
+
+ with open(output_path, "w") as f:
+ json.dump(structure, f, indent=2)
+
+ print(f"Found:")
+ print(f" - {len(structure['pages'])} pages")
+ print(f" - {len(structure['labels'])} text labels")
+ print(f" - {len(structure['lines'])} horizontal lines")
+ print(f" - {len(structure['checkboxes'])} checkboxes")
+ print(f" - {len(structure['row_boundaries'])} row boundaries")
+ print(f"Saved to {output_path}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/pdf/scripts/fill_fillable_fields.py b/.claude/skills/pdf/scripts/fill_fillable_fields.py
new file mode 100644
index 0000000..51c2600
--- /dev/null
+++ b/.claude/skills/pdf/scripts/fill_fillable_fields.py
@@ -0,0 +1,98 @@
+import json
+import sys
+
+from pypdf import PdfReader, PdfWriter
+
+from extract_form_field_info import get_field_info
+
+
+
+
+def fill_pdf_fields(input_pdf_path: str, fields_json_path: str, output_pdf_path: str):
+ with open(fields_json_path) as f:
+ fields = json.load(f)
+ fields_by_page = {}
+ for field in fields:
+ if "value" in field:
+ field_id = field["field_id"]
+ page = field["page"]
+ if page not in fields_by_page:
+ fields_by_page[page] = {}
+ fields_by_page[page][field_id] = field["value"]
+
+ reader = PdfReader(input_pdf_path)
+
+ has_error = False
+ field_info = get_field_info(reader)
+ fields_by_ids = {f["field_id"]: f for f in field_info}
+ for field in fields:
+ existing_field = fields_by_ids.get(field["field_id"])
+ if not existing_field:
+ has_error = True
+ print(f"ERROR: `{field['field_id']}` is not a valid field ID")
+ elif field["page"] != existing_field["page"]:
+ has_error = True
+ print(f"ERROR: Incorrect page number for `{field['field_id']}` (got {field['page']}, expected {existing_field['page']})")
+ else:
+ if "value" in field:
+ err = validation_error_for_field_value(existing_field, field["value"])
+ if err:
+ print(err)
+ has_error = True
+ if has_error:
+ sys.exit(1)
+
+ writer = PdfWriter(clone_from=reader)
+ for page, field_values in fields_by_page.items():
+ writer.update_page_form_field_values(writer.pages[page - 1], field_values, auto_regenerate=False)
+
+ writer.set_need_appearances_writer(True)
+
+ with open(output_pdf_path, "wb") as f:
+ writer.write(f)
+
+
+def validation_error_for_field_value(field_info, field_value):
+ field_type = field_info["type"]
+ field_id = field_info["field_id"]
+ if field_type == "checkbox":
+ checked_val = field_info["checked_value"]
+ unchecked_val = field_info["unchecked_value"]
+ if field_value != checked_val and field_value != unchecked_val:
+ return f'ERROR: Invalid value "{field_value}" for checkbox field "{field_id}". The checked value is "{checked_val}" and the unchecked value is "{unchecked_val}"'
+ elif field_type == "radio_group":
+ option_values = [opt["value"] for opt in field_info["radio_options"]]
+ if field_value not in option_values:
+ return f'ERROR: Invalid value "{field_value}" for radio group field "{field_id}". Valid values are: {option_values}'
+ elif field_type == "choice":
+ choice_values = [opt["value"] for opt in field_info["choice_options"]]
+ if field_value not in choice_values:
+ return f'ERROR: Invalid value "{field_value}" for choice field "{field_id}". Valid values are: {choice_values}'
+ return None
+
+
+def monkeypatch_pydpf_method():
+ from pypdf.generic import DictionaryObject
+ from pypdf.constants import FieldDictionaryAttributes
+
+ original_get_inherited = DictionaryObject.get_inherited
+
+ def patched_get_inherited(self, key: str, default = None):
+ result = original_get_inherited(self, key, default)
+ if key == FieldDictionaryAttributes.Opt:
+ if isinstance(result, list) and all(isinstance(v, list) and len(v) == 2 for v in result):
+ result = [r[0] for r in result]
+ return result
+
+ DictionaryObject.get_inherited = patched_get_inherited
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 4:
+ print("Usage: fill_fillable_fields.py [input pdf] [field_values.json] [output pdf]")
+ sys.exit(1)
+ monkeypatch_pydpf_method()
+ input_pdf = sys.argv[1]
+ fields_json = sys.argv[2]
+ output_pdf = sys.argv[3]
+ fill_pdf_fields(input_pdf, fields_json, output_pdf)
diff --git a/.claude/skills/pdf/scripts/fill_pdf_form_with_annotations.py b/.claude/skills/pdf/scripts/fill_pdf_form_with_annotations.py
new file mode 100644
index 0000000..b430069
--- /dev/null
+++ b/.claude/skills/pdf/scripts/fill_pdf_form_with_annotations.py
@@ -0,0 +1,107 @@
+import json
+import sys
+
+from pypdf import PdfReader, PdfWriter
+from pypdf.annotations import FreeText
+
+
+
+
+def transform_from_image_coords(bbox, image_width, image_height, pdf_width, pdf_height):
+ x_scale = pdf_width / image_width
+ y_scale = pdf_height / image_height
+
+ left = bbox[0] * x_scale
+ right = bbox[2] * x_scale
+
+ top = pdf_height - (bbox[1] * y_scale)
+ bottom = pdf_height - (bbox[3] * y_scale)
+
+ return left, bottom, right, top
+
+
+def transform_from_pdf_coords(bbox, pdf_height):
+ left = bbox[0]
+ right = bbox[2]
+
+ pypdf_top = pdf_height - bbox[1]
+ pypdf_bottom = pdf_height - bbox[3]
+
+ return left, pypdf_bottom, right, pypdf_top
+
+
+def fill_pdf_form(input_pdf_path, fields_json_path, output_pdf_path):
+
+ with open(fields_json_path, "r") as f:
+ fields_data = json.load(f)
+
+ reader = PdfReader(input_pdf_path)
+ writer = PdfWriter()
+
+ writer.append(reader)
+
+ pdf_dimensions = {}
+ for i, page in enumerate(reader.pages):
+ mediabox = page.mediabox
+ pdf_dimensions[i + 1] = [mediabox.width, mediabox.height]
+
+ annotations = []
+ for field in fields_data["form_fields"]:
+ page_num = field["page_number"]
+
+ page_info = next(p for p in fields_data["pages"] if p["page_number"] == page_num)
+ pdf_width, pdf_height = pdf_dimensions[page_num]
+
+ if "pdf_width" in page_info:
+ transformed_entry_box = transform_from_pdf_coords(
+ field["entry_bounding_box"],
+ float(pdf_height)
+ )
+ else:
+ image_width = page_info["image_width"]
+ image_height = page_info["image_height"]
+ transformed_entry_box = transform_from_image_coords(
+ field["entry_bounding_box"],
+ image_width, image_height,
+ float(pdf_width), float(pdf_height)
+ )
+
+ if "entry_text" not in field or "text" not in field["entry_text"]:
+ continue
+ entry_text = field["entry_text"]
+ text = entry_text["text"]
+ if not text:
+ continue
+
+ font_name = entry_text.get("font", "Arial")
+ font_size = str(entry_text.get("font_size", 14)) + "pt"
+ font_color = entry_text.get("font_color", "000000")
+
+ annotation = FreeText(
+ text=text,
+ rect=transformed_entry_box,
+ font=font_name,
+ font_size=font_size,
+ font_color=font_color,
+ border_color=None,
+ background_color=None,
+ )
+ annotations.append(annotation)
+ writer.add_annotation(page_number=page_num - 1, annotation=annotation)
+
+ with open(output_pdf_path, "wb") as output:
+ writer.write(output)
+
+ print(f"Successfully filled PDF form and saved to {output_pdf_path}")
+ print(f"Added {len(annotations)} text annotations")
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 4:
+ print("Usage: fill_pdf_form_with_annotations.py [input pdf] [fields.json] [output pdf]")
+ sys.exit(1)
+ input_pdf = sys.argv[1]
+ fields_json = sys.argv[2]
+ output_pdf = sys.argv[3]
+
+ fill_pdf_form(input_pdf, fields_json, output_pdf)
diff --git a/.claude/skills/pptx/.openskills.json b/.claude/skills/pptx/.openskills.json
new file mode 100644
index 0000000..784bccb
--- /dev/null
+++ b/.claude/skills/pptx/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\pptx",
+ "installedAt": "2026-03-02T09:19:50.145Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/pptx/LICENSE.txt b/.claude/skills/pptx/LICENSE.txt
new file mode 100644
index 0000000..c55ab42
--- /dev/null
+++ b/.claude/skills/pptx/LICENSE.txt
@@ -0,0 +1,30 @@
+© 2025 Anthropic, PBC. All rights reserved.
+
+LICENSE: Use of these materials (including all code, prompts, assets, files,
+and other components of this Skill) is governed by your agreement with
+Anthropic regarding use of Anthropic's services. If no separate agreement
+exists, use is governed by Anthropic's Consumer Terms of Service or
+Commercial Terms of Service, as applicable:
+https://www.anthropic.com/legal/consumer-terms
+https://www.anthropic.com/legal/commercial-terms
+Your applicable agreement is referred to as the "Agreement." "Services" are
+as defined in the Agreement.
+
+ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the
+contrary, users may not:
+
+- Extract these materials from the Services or retain copies of these
+ materials outside the Services
+- Reproduce or copy these materials, except for temporary copies created
+ automatically during authorized use of the Services
+- Create derivative works based on these materials
+- Distribute, sublicense, or transfer these materials to any third party
+- Make, offer to sell, sell, or import any inventions embodied in these
+ materials
+- Reverse engineer, decompile, or disassemble these materials
+
+The receipt, viewing, or possession of these materials does not convey or
+imply any license or right beyond those expressly granted above.
+
+Anthropic retains all right, title, and interest in these materials,
+including all copyrights, patents, and other intellectual property rights.
diff --git a/.claude/skills/pptx/SKILL.md b/.claude/skills/pptx/SKILL.md
new file mode 100644
index 0000000..df5000e
--- /dev/null
+++ b/.claude/skills/pptx/SKILL.md
@@ -0,0 +1,232 @@
+---
+name: pptx
+description: "Use this skill any time a .pptx file is involved in any way — as input, output, or both. This includes: creating slide decks, pitch decks, or presentations; reading, parsing, or extracting text from any .pptx file (even if the extracted content will be used elsewhere, like in an email or summary); editing, modifying, or updating existing presentations; combining or splitting slide files; working with templates, layouts, speaker notes, or comments. Trigger whenever the user mentions \"deck,\" \"slides,\" \"presentation,\" or references a .pptx filename, regardless of what they plan to do with the content afterward. If a .pptx file needs to be opened, created, or touched, use this skill."
+license: Proprietary. LICENSE.txt has complete terms
+---
+
+# PPTX Skill
+
+## Quick Reference
+
+| Task | Guide |
+|------|-------|
+| Read/analyze content | `python -m markitdown presentation.pptx` |
+| Edit or create from template | Read [editing.md](editing.md) |
+| Create from scratch | Read [pptxgenjs.md](pptxgenjs.md) |
+
+---
+
+## Reading Content
+
+```bash
+# Text extraction
+python -m markitdown presentation.pptx
+
+# Visual overview
+python scripts/thumbnail.py presentation.pptx
+
+# Raw XML
+python scripts/office/unpack.py presentation.pptx unpacked/
+```
+
+---
+
+## Editing Workflow
+
+**Read [editing.md](editing.md) for full details.**
+
+1. Analyze template with `thumbnail.py`
+2. Unpack → manipulate slides → edit content → clean → pack
+
+---
+
+## Creating from Scratch
+
+**Read [pptxgenjs.md](pptxgenjs.md) for full details.**
+
+Use when no template or reference presentation is available.
+
+---
+
+## Design Ideas
+
+**Don't create boring slides.** Plain bullets on a white background won't impress anyone. Consider ideas from this list for each slide.
+
+### Before Starting
+
+- **Pick a bold, content-informed color palette**: The palette should feel designed for THIS topic. If swapping your colors into a completely different presentation would still "work," you haven't made specific enough choices.
+- **Dominance over equality**: One color should dominate (60-70% visual weight), with 1-2 supporting tones and one sharp accent. Never give all colors equal weight.
+- **Dark/light contrast**: Dark backgrounds for title + conclusion slides, light for content ("sandwich" structure). Or commit to dark throughout for a premium feel.
+- **Commit to a visual motif**: Pick ONE distinctive element and repeat it — rounded image frames, icons in colored circles, thick single-side borders. Carry it across every slide.
+
+### Color Palettes
+
+Choose colors that match your topic — don't default to generic blue. Use these palettes as inspiration:
+
+| Theme | Primary | Secondary | Accent |
+|-------|---------|-----------|--------|
+| **Midnight Executive** | `1E2761` (navy) | `CADCFC` (ice blue) | `FFFFFF` (white) |
+| **Forest & Moss** | `2C5F2D` (forest) | `97BC62` (moss) | `F5F5F5` (cream) |
+| **Coral Energy** | `F96167` (coral) | `F9E795` (gold) | `2F3C7E` (navy) |
+| **Warm Terracotta** | `B85042` (terracotta) | `E7E8D1` (sand) | `A7BEAE` (sage) |
+| **Ocean Gradient** | `065A82` (deep blue) | `1C7293` (teal) | `21295C` (midnight) |
+| **Charcoal Minimal** | `36454F` (charcoal) | `F2F2F2` (off-white) | `212121` (black) |
+| **Teal Trust** | `028090` (teal) | `00A896` (seafoam) | `02C39A` (mint) |
+| **Berry & Cream** | `6D2E46` (berry) | `A26769` (dusty rose) | `ECE2D0` (cream) |
+| **Sage Calm** | `84B59F` (sage) | `69A297` (eucalyptus) | `50808E` (slate) |
+| **Cherry Bold** | `990011` (cherry) | `FCF6F5` (off-white) | `2F3C7E` (navy) |
+
+### For Each Slide
+
+**Every slide needs a visual element** — image, chart, icon, or shape. Text-only slides are forgettable.
+
+**Layout options:**
+- Two-column (text left, illustration on right)
+- Icon + text rows (icon in colored circle, bold header, description below)
+- 2x2 or 2x3 grid (image on one side, grid of content blocks on other)
+- Half-bleed image (full left or right side) with content overlay
+
+**Data display:**
+- Large stat callouts (big numbers 60-72pt with small labels below)
+- Comparison columns (before/after, pros/cons, side-by-side options)
+- Timeline or process flow (numbered steps, arrows)
+
+**Visual polish:**
+- Icons in small colored circles next to section headers
+- Italic accent text for key stats or taglines
+
+### Typography
+
+**Choose an interesting font pairing** — don't default to Arial. Pick a header font with personality and pair it with a clean body font.
+
+| Header Font | Body Font |
+|-------------|-----------|
+| Georgia | Calibri |
+| Arial Black | Arial |
+| Calibri | Calibri Light |
+| Cambria | Calibri |
+| Trebuchet MS | Calibri |
+| Impact | Arial |
+| Palatino | Garamond |
+| Consolas | Calibri |
+
+| Element | Size |
+|---------|------|
+| Slide title | 36-44pt bold |
+| Section header | 20-24pt bold |
+| Body text | 14-16pt |
+| Captions | 10-12pt muted |
+
+### Spacing
+
+- 0.5" minimum margins
+- 0.3-0.5" between content blocks
+- Leave breathing room—don't fill every inch
+
+### Avoid (Common Mistakes)
+
+- **Don't repeat the same layout** — vary columns, cards, and callouts across slides
+- **Don't center body text** — left-align paragraphs and lists; center only titles
+- **Don't skimp on size contrast** — titles need 36pt+ to stand out from 14-16pt body
+- **Don't default to blue** — pick colors that reflect the specific topic
+- **Don't mix spacing randomly** — choose 0.3" or 0.5" gaps and use consistently
+- **Don't style one slide and leave the rest plain** — commit fully or keep it simple throughout
+- **Don't create text-only slides** — add images, icons, charts, or visual elements; avoid plain title + bullets
+- **Don't forget text box padding** — when aligning lines or shapes with text edges, set `margin: 0` on the text box or offset the shape to account for padding
+- **Don't use low-contrast elements** — icons AND text need strong contrast against the background; avoid light text on light backgrounds or dark text on dark backgrounds
+- **NEVER use accent lines under titles** — these are a hallmark of AI-generated slides; use whitespace or background color instead
+
+---
+
+## QA (Required)
+
+**Assume there are problems. Your job is to find them.**
+
+Your first render is almost never correct. Approach QA as a bug hunt, not a confirmation step. If you found zero issues on first inspection, you weren't looking hard enough.
+
+### Content QA
+
+```bash
+python -m markitdown output.pptx
+```
+
+Check for missing content, typos, wrong order.
+
+**When using templates, check for leftover placeholder text:**
+
+```bash
+python -m markitdown output.pptx | grep -iE "xxxx|lorem|ipsum|this.*(page|slide).*layout"
+```
+
+If grep returns results, fix them before declaring success.
+
+### Visual QA
+
+**⚠️ USE SUBAGENTS** — even for 2-3 slides. You've been staring at the code and will see what you expect, not what's there. Subagents have fresh eyes.
+
+Convert slides to images (see [Converting to Images](#converting-to-images)), then use this prompt:
+
+```
+Visually inspect these slides. Assume there are issues — find them.
+
+Look for:
+- Overlapping elements (text through shapes, lines through words, stacked elements)
+- Text overflow or cut off at edges/box boundaries
+- Decorative lines positioned for single-line text but title wrapped to two lines
+- Source citations or footers colliding with content above
+- Elements too close (< 0.3" gaps) or cards/sections nearly touching
+- Uneven gaps (large empty area in one place, cramped in another)
+- Insufficient margin from slide edges (< 0.5")
+- Columns or similar elements not aligned consistently
+- Low-contrast text (e.g., light gray text on cream-colored background)
+- Low-contrast icons (e.g., dark icons on dark backgrounds without a contrasting circle)
+- Text boxes too narrow causing excessive wrapping
+- Leftover placeholder content
+
+For each slide, list issues or areas of concern, even if minor.
+
+Read and analyze these images:
+1. /path/to/slide-01.jpg (Expected: [brief description])
+2. /path/to/slide-02.jpg (Expected: [brief description])
+
+Report ALL issues found, including minor ones.
+```
+
+### Verification Loop
+
+1. Generate slides → Convert to images → Inspect
+2. **List issues found** (if none found, look again more critically)
+3. Fix issues
+4. **Re-verify affected slides** — one fix often creates another problem
+5. Repeat until a full pass reveals no new issues
+
+**Do not declare success until you've completed at least one fix-and-verify cycle.**
+
+---
+
+## Converting to Images
+
+Convert presentations to individual slide images for visual inspection:
+
+```bash
+python scripts/office/soffice.py --headless --convert-to pdf output.pptx
+pdftoppm -jpeg -r 150 output.pdf slide
+```
+
+This creates `slide-01.jpg`, `slide-02.jpg`, etc.
+
+To re-render specific slides after fixes:
+
+```bash
+pdftoppm -jpeg -r 150 -f N -l N output.pdf slide-fixed
+```
+
+---
+
+## Dependencies
+
+- `pip install "markitdown[pptx]"` - text extraction
+- `pip install Pillow` - thumbnail grids
+- `npm install -g pptxgenjs` - creating from scratch
+- LibreOffice (`soffice`) - PDF conversion (auto-configured for sandboxed environments via `scripts/office/soffice.py`)
+- Poppler (`pdftoppm`) - PDF to images
diff --git a/.claude/skills/pptx/editing.md b/.claude/skills/pptx/editing.md
new file mode 100644
index 0000000..f873e8a
--- /dev/null
+++ b/.claude/skills/pptx/editing.md
@@ -0,0 +1,205 @@
+# Editing Presentations
+
+## Template-Based Workflow
+
+When using an existing presentation as a template:
+
+1. **Analyze existing slides**:
+ ```bash
+ python scripts/thumbnail.py template.pptx
+ python -m markitdown template.pptx
+ ```
+ Review `thumbnails.jpg` to see layouts, and markitdown output to see placeholder text.
+
+2. **Plan slide mapping**: For each content section, choose a template slide.
+
+ ⚠️ **USE VARIED LAYOUTS** — monotonous presentations are a common failure mode. Don't default to basic title + bullet slides. Actively seek out:
+ - Multi-column layouts (2-column, 3-column)
+ - Image + text combinations
+ - Full-bleed images with text overlay
+ - Quote or callout slides
+ - Section dividers
+ - Stat/number callouts
+ - Icon grids or icon + text rows
+
+ **Avoid:** Repeating the same text-heavy layout for every slide.
+
+ Match content type to layout style (e.g., key points → bullet slide, team info → multi-column, testimonials → quote slide).
+
+3. **Unpack**: `python scripts/office/unpack.py template.pptx unpacked/`
+
+4. **Build presentation** (do this yourself, not with subagents):
+ - Delete unwanted slides (remove from ``)
+ - Duplicate slides you want to reuse (`add_slide.py`)
+ - Reorder slides in ``
+ - **Complete all structural changes before step 5**
+
+5. **Edit content**: Update text in each `slide{N}.xml`.
+ **Use subagents here if available** — slides are separate XML files, so subagents can edit in parallel.
+
+6. **Clean**: `python scripts/clean.py unpacked/`
+
+7. **Pack**: `python scripts/office/pack.py unpacked/ output.pptx --original template.pptx`
+
+---
+
+## Scripts
+
+| Script | Purpose |
+|--------|---------|
+| `unpack.py` | Extract and pretty-print PPTX |
+| `add_slide.py` | Duplicate slide or create from layout |
+| `clean.py` | Remove orphaned files |
+| `pack.py` | Repack with validation |
+| `thumbnail.py` | Create visual grid of slides |
+
+### unpack.py
+
+```bash
+python scripts/office/unpack.py input.pptx unpacked/
+```
+
+Extracts PPTX, pretty-prints XML, escapes smart quotes.
+
+### add_slide.py
+
+```bash
+python scripts/add_slide.py unpacked/ slide2.xml # Duplicate slide
+python scripts/add_slide.py unpacked/ slideLayout2.xml # From layout
+```
+
+Prints `` to add to `` at desired position.
+
+### clean.py
+
+```bash
+python scripts/clean.py unpacked/
+```
+
+Removes slides not in ``, unreferenced media, orphaned rels.
+
+### pack.py
+
+```bash
+python scripts/office/pack.py unpacked/ output.pptx --original input.pptx
+```
+
+Validates, repairs, condenses XML, re-encodes smart quotes.
+
+### thumbnail.py
+
+```bash
+python scripts/thumbnail.py input.pptx [output_prefix] [--cols N]
+```
+
+Creates `thumbnails.jpg` with slide filenames as labels. Default 3 columns, max 12 per grid.
+
+**Use for template analysis only** (choosing layouts). For visual QA, use `soffice` + `pdftoppm` to create full-resolution individual slide images—see SKILL.md.
+
+---
+
+## Slide Operations
+
+Slide order is in `ppt/presentation.xml` → ``.
+
+**Reorder**: Rearrange `` elements.
+
+**Delete**: Remove ``, then run `clean.py`.
+
+**Add**: Use `add_slide.py`. Never manually copy slide files—the script handles notes references, Content_Types.xml, and relationship IDs that manual copying misses.
+
+---
+
+## Editing Content
+
+**Subagents:** If available, use them here (after completing step 4). Each slide is a separate XML file, so subagents can edit in parallel. In your prompt to subagents, include:
+- The slide file path(s) to edit
+- **"Use the Edit tool for all changes"**
+- The formatting rules and common pitfalls below
+
+For each slide:
+1. Read the slide's XML
+2. Identify ALL placeholder content—text, images, charts, icons, captions
+3. Replace each placeholder with final content
+
+**Use the Edit tool, not sed or Python scripts.** The Edit tool forces specificity about what to replace and where, yielding better reliability.
+
+### Formatting Rules
+
+- **Bold all headers, subheadings, and inline labels**: Use `b="1"` on ``. This includes:
+ - Slide titles
+ - Section headers within a slide
+ - Inline labels like (e.g.: "Status:", "Description:") at the start of a line
+- **Never use unicode bullets (•)**: Use proper list formatting with `` or ``
+- **Bullet consistency**: Let bullets inherit from the layout. Only specify `` or ``.
+
+---
+
+## Common Pitfalls
+
+### Template Adaptation
+
+When source content has fewer items than the template:
+- **Remove excess elements entirely** (images, shapes, text boxes), don't just clear text
+- Check for orphaned visuals after clearing text content
+- Run visual QA to catch mismatched counts
+
+When replacing text with different length content:
+- **Shorter replacements**: Usually safe
+- **Longer replacements**: May overflow or wrap unexpectedly
+- Test with visual QA after text changes
+- Consider truncating or splitting content to fit the template's design constraints
+
+**Template slots ≠ Source items**: If template has 4 team members but source has 3 users, delete the 4th member's entire group (image + text boxes), not just the text.
+
+### Multi-Item Content
+
+If source has multiple items (numbered lists, multiple sections), create separate `` elements for each — **never concatenate into one string**.
+
+**❌ WRONG** — all items in one paragraph:
+```xml
+
+ Step 1: Do the first thing. Step 2: Do the second thing.
+
+```
+
+**✅ CORRECT** — separate paragraphs with bold headers:
+```xml
+
+
+ Step 1
+
+
+
+ Do the first thing.
+
+
+
+ Step 2
+
+
+```
+
+Copy `` from the original paragraph to preserve line spacing. Use `b="1"` on headers.
+
+### Smart Quotes
+
+Handled automatically by unpack/pack. But the Edit tool converts smart quotes to ASCII.
+
+**When adding new text with quotes, use XML entities:**
+
+```xml
+the “Agreement”
+```
+
+| Character | Name | Unicode | XML Entity |
+|-----------|------|---------|------------|
+| `“` | Left double quote | U+201C | `“` |
+| `”` | Right double quote | U+201D | `”` |
+| `‘` | Left single quote | U+2018 | `‘` |
+| `’` | Right single quote | U+2019 | `’` |
+
+### Other
+
+- **Whitespace**: Use `xml:space="preserve"` on `` with leading/trailing spaces
+- **XML parsing**: Use `defusedxml.minidom`, not `xml.etree.ElementTree` (corrupts namespaces)
diff --git a/.claude/skills/pptx/pptxgenjs.md b/.claude/skills/pptx/pptxgenjs.md
new file mode 100644
index 0000000..6bfed90
--- /dev/null
+++ b/.claude/skills/pptx/pptxgenjs.md
@@ -0,0 +1,420 @@
+# PptxGenJS Tutorial
+
+## Setup & Basic Structure
+
+```javascript
+const pptxgen = require("pptxgenjs");
+
+let pres = new pptxgen();
+pres.layout = 'LAYOUT_16x9'; // or 'LAYOUT_16x10', 'LAYOUT_4x3', 'LAYOUT_WIDE'
+pres.author = 'Your Name';
+pres.title = 'Presentation Title';
+
+let slide = pres.addSlide();
+slide.addText("Hello World!", { x: 0.5, y: 0.5, fontSize: 36, color: "363636" });
+
+pres.writeFile({ fileName: "Presentation.pptx" });
+```
+
+## Layout Dimensions
+
+Slide dimensions (coordinates in inches):
+- `LAYOUT_16x9`: 10" × 5.625" (default)
+- `LAYOUT_16x10`: 10" × 6.25"
+- `LAYOUT_4x3`: 10" × 7.5"
+- `LAYOUT_WIDE`: 13.3" × 7.5"
+
+---
+
+## Text & Formatting
+
+```javascript
+// Basic text
+slide.addText("Simple Text", {
+ x: 1, y: 1, w: 8, h: 2, fontSize: 24, fontFace: "Arial",
+ color: "363636", bold: true, align: "center", valign: "middle"
+});
+
+// Character spacing (use charSpacing, not letterSpacing which is silently ignored)
+slide.addText("SPACED TEXT", { x: 1, y: 1, w: 8, h: 1, charSpacing: 6 });
+
+// Rich text arrays
+slide.addText([
+ { text: "Bold ", options: { bold: true } },
+ { text: "Italic ", options: { italic: true } }
+], { x: 1, y: 3, w: 8, h: 1 });
+
+// Multi-line text (requires breakLine: true)
+slide.addText([
+ { text: "Line 1", options: { breakLine: true } },
+ { text: "Line 2", options: { breakLine: true } },
+ { text: "Line 3" } // Last item doesn't need breakLine
+], { x: 0.5, y: 0.5, w: 8, h: 2 });
+
+// Text box margin (internal padding)
+slide.addText("Title", {
+ x: 0.5, y: 0.3, w: 9, h: 0.6,
+ margin: 0 // Use 0 when aligning text with other elements like shapes or icons
+});
+```
+
+**Tip:** Text boxes have internal margin by default. Set `margin: 0` when you need text to align precisely with shapes, lines, or icons at the same x-position.
+
+---
+
+## Lists & Bullets
+
+```javascript
+// ✅ CORRECT: Multiple bullets
+slide.addText([
+ { text: "First item", options: { bullet: true, breakLine: true } },
+ { text: "Second item", options: { bullet: true, breakLine: true } },
+ { text: "Third item", options: { bullet: true } }
+], { x: 0.5, y: 0.5, w: 8, h: 3 });
+
+// ❌ WRONG: Never use unicode bullets
+slide.addText("• First item", { ... }); // Creates double bullets
+
+// Sub-items and numbered lists
+{ text: "Sub-item", options: { bullet: true, indentLevel: 1 } }
+{ text: "First", options: { bullet: { type: "number" }, breakLine: true } }
+```
+
+---
+
+## Shapes
+
+```javascript
+slide.addShape(pres.shapes.RECTANGLE, {
+ x: 0.5, y: 0.8, w: 1.5, h: 3.0,
+ fill: { color: "FF0000" }, line: { color: "000000", width: 2 }
+});
+
+slide.addShape(pres.shapes.OVAL, { x: 4, y: 1, w: 2, h: 2, fill: { color: "0000FF" } });
+
+slide.addShape(pres.shapes.LINE, {
+ x: 1, y: 3, w: 5, h: 0, line: { color: "FF0000", width: 3, dashType: "dash" }
+});
+
+// With transparency
+slide.addShape(pres.shapes.RECTANGLE, {
+ x: 1, y: 1, w: 3, h: 2,
+ fill: { color: "0088CC", transparency: 50 }
+});
+
+// Rounded rectangle (rectRadius only works with ROUNDED_RECTANGLE, not RECTANGLE)
+// ⚠️ Don't pair with rectangular accent overlays — they won't cover rounded corners. Use RECTANGLE instead.
+slide.addShape(pres.shapes.ROUNDED_RECTANGLE, {
+ x: 1, y: 1, w: 3, h: 2,
+ fill: { color: "FFFFFF" }, rectRadius: 0.1
+});
+
+// With shadow
+slide.addShape(pres.shapes.RECTANGLE, {
+ x: 1, y: 1, w: 3, h: 2,
+ fill: { color: "FFFFFF" },
+ shadow: { type: "outer", color: "000000", blur: 6, offset: 2, angle: 135, opacity: 0.15 }
+});
+```
+
+Shadow options:
+
+| Property | Type | Range | Notes |
+|----------|------|-------|-------|
+| `type` | string | `"outer"`, `"inner"` | |
+| `color` | string | 6-char hex (e.g. `"000000"`) | No `#` prefix, no 8-char hex — see Common Pitfalls |
+| `blur` | number | 0-100 pt | |
+| `offset` | number | 0-200 pt | **Must be non-negative** — negative values corrupt the file |
+| `angle` | number | 0-359 degrees | Direction the shadow falls (135 = bottom-right, 270 = upward) |
+| `opacity` | number | 0.0-1.0 | Use this for transparency, never encode in color string |
+
+To cast a shadow upward (e.g. on a footer bar), use `angle: 270` with a positive offset — do **not** use a negative offset.
+
+**Note**: Gradient fills are not natively supported. Use a gradient image as a background instead.
+
+---
+
+## Images
+
+### Image Sources
+
+```javascript
+// From file path
+slide.addImage({ path: "images/chart.png", x: 1, y: 1, w: 5, h: 3 });
+
+// From URL
+slide.addImage({ path: "https://example.com/image.jpg", x: 1, y: 1, w: 5, h: 3 });
+
+// From base64 (faster, no file I/O)
+slide.addImage({ data: "image/png;base64,iVBORw0KGgo...", x: 1, y: 1, w: 5, h: 3 });
+```
+
+### Image Options
+
+```javascript
+slide.addImage({
+ path: "image.png",
+ x: 1, y: 1, w: 5, h: 3,
+ rotate: 45, // 0-359 degrees
+ rounding: true, // Circular crop
+ transparency: 50, // 0-100
+ flipH: true, // Horizontal flip
+ flipV: false, // Vertical flip
+ altText: "Description", // Accessibility
+ hyperlink: { url: "https://example.com" }
+});
+```
+
+### Image Sizing Modes
+
+```javascript
+// Contain - fit inside, preserve ratio
+{ sizing: { type: 'contain', w: 4, h: 3 } }
+
+// Cover - fill area, preserve ratio (may crop)
+{ sizing: { type: 'cover', w: 4, h: 3 } }
+
+// Crop - cut specific portion
+{ sizing: { type: 'crop', x: 0.5, y: 0.5, w: 2, h: 2 } }
+```
+
+### Calculate Dimensions (preserve aspect ratio)
+
+```javascript
+const origWidth = 1978, origHeight = 923, maxHeight = 3.0;
+const calcWidth = maxHeight * (origWidth / origHeight);
+const centerX = (10 - calcWidth) / 2;
+
+slide.addImage({ path: "image.png", x: centerX, y: 1.2, w: calcWidth, h: maxHeight });
+```
+
+### Supported Formats
+
+- **Standard**: PNG, JPG, GIF (animated GIFs work in Microsoft 365)
+- **SVG**: Works in modern PowerPoint/Microsoft 365
+
+---
+
+## Icons
+
+Use react-icons to generate SVG icons, then rasterize to PNG for universal compatibility.
+
+### Setup
+
+```javascript
+const React = require("react");
+const ReactDOMServer = require("react-dom/server");
+const sharp = require("sharp");
+const { FaCheckCircle, FaChartLine } = require("react-icons/fa");
+
+function renderIconSvg(IconComponent, color = "#000000", size = 256) {
+ return ReactDOMServer.renderToStaticMarkup(
+ React.createElement(IconComponent, { color, size: String(size) })
+ );
+}
+
+async function iconToBase64Png(IconComponent, color, size = 256) {
+ const svg = renderIconSvg(IconComponent, color, size);
+ const pngBuffer = await sharp(Buffer.from(svg)).png().toBuffer();
+ return "image/png;base64," + pngBuffer.toString("base64");
+}
+```
+
+### Add Icon to Slide
+
+```javascript
+const iconData = await iconToBase64Png(FaCheckCircle, "#4472C4", 256);
+
+slide.addImage({
+ data: iconData,
+ x: 1, y: 1, w: 0.5, h: 0.5 // Size in inches
+});
+```
+
+**Note**: Use size 256 or higher for crisp icons. The size parameter controls the rasterization resolution, not the display size on the slide (which is set by `w` and `h` in inches).
+
+### Icon Libraries
+
+Install: `npm install -g react-icons react react-dom sharp`
+
+Popular icon sets in react-icons:
+- `react-icons/fa` - Font Awesome
+- `react-icons/md` - Material Design
+- `react-icons/hi` - Heroicons
+- `react-icons/bi` - Bootstrap Icons
+
+---
+
+## Slide Backgrounds
+
+```javascript
+// Solid color
+slide.background = { color: "F1F1F1" };
+
+// Color with transparency
+slide.background = { color: "FF3399", transparency: 50 };
+
+// Image from URL
+slide.background = { path: "https://example.com/bg.jpg" };
+
+// Image from base64
+slide.background = { data: "image/png;base64,iVBORw0KGgo..." };
+```
+
+---
+
+## Tables
+
+```javascript
+slide.addTable([
+ ["Header 1", "Header 2"],
+ ["Cell 1", "Cell 2"]
+], {
+ x: 1, y: 1, w: 8, h: 2,
+ border: { pt: 1, color: "999999" }, fill: { color: "F1F1F1" }
+});
+
+// Advanced with merged cells
+let tableData = [
+ [{ text: "Header", options: { fill: { color: "6699CC" }, color: "FFFFFF", bold: true } }, "Cell"],
+ [{ text: "Merged", options: { colspan: 2 } }]
+];
+slide.addTable(tableData, { x: 1, y: 3.5, w: 8, colW: [4, 4] });
+```
+
+---
+
+## Charts
+
+```javascript
+// Bar chart
+slide.addChart(pres.charts.BAR, [{
+ name: "Sales", labels: ["Q1", "Q2", "Q3", "Q4"], values: [4500, 5500, 6200, 7100]
+}], {
+ x: 0.5, y: 0.6, w: 6, h: 3, barDir: 'col',
+ showTitle: true, title: 'Quarterly Sales'
+});
+
+// Line chart
+slide.addChart(pres.charts.LINE, [{
+ name: "Temp", labels: ["Jan", "Feb", "Mar"], values: [32, 35, 42]
+}], { x: 0.5, y: 4, w: 6, h: 3, lineSize: 3, lineSmooth: true });
+
+// Pie chart
+slide.addChart(pres.charts.PIE, [{
+ name: "Share", labels: ["A", "B", "Other"], values: [35, 45, 20]
+}], { x: 7, y: 1, w: 5, h: 4, showPercent: true });
+```
+
+### Better-Looking Charts
+
+Default charts look dated. Apply these options for a modern, clean appearance:
+
+```javascript
+slide.addChart(pres.charts.BAR, chartData, {
+ x: 0.5, y: 1, w: 9, h: 4, barDir: "col",
+
+ // Custom colors (match your presentation palette)
+ chartColors: ["0D9488", "14B8A6", "5EEAD4"],
+
+ // Clean background
+ chartArea: { fill: { color: "FFFFFF" }, roundedCorners: true },
+
+ // Muted axis labels
+ catAxisLabelColor: "64748B",
+ valAxisLabelColor: "64748B",
+
+ // Subtle grid (value axis only)
+ valGridLine: { color: "E2E8F0", size: 0.5 },
+ catGridLine: { style: "none" },
+
+ // Data labels on bars
+ showValue: true,
+ dataLabelPosition: "outEnd",
+ dataLabelColor: "1E293B",
+
+ // Hide legend for single series
+ showLegend: false,
+});
+```
+
+**Key styling options:**
+- `chartColors: [...]` - hex colors for series/segments
+- `chartArea: { fill, border, roundedCorners }` - chart background
+- `catGridLine/valGridLine: { color, style, size }` - grid lines (`style: "none"` to hide)
+- `lineSmooth: true` - curved lines (line charts)
+- `legendPos: "r"` - legend position: "b", "t", "l", "r", "tr"
+
+---
+
+## Slide Masters
+
+```javascript
+pres.defineSlideMaster({
+ title: 'TITLE_SLIDE', background: { color: '283A5E' },
+ objects: [{
+ placeholder: { options: { name: 'title', type: 'title', x: 1, y: 2, w: 8, h: 2 } }
+ }]
+});
+
+let titleSlide = pres.addSlide({ masterName: "TITLE_SLIDE" });
+titleSlide.addText("My Title", { placeholder: "title" });
+```
+
+---
+
+## Common Pitfalls
+
+⚠️ These issues cause file corruption, visual bugs, or broken output. Avoid them.
+
+1. **NEVER use "#" with hex colors** - causes file corruption
+ ```javascript
+ color: "FF0000" // ✅ CORRECT
+ color: "#FF0000" // ❌ WRONG
+ ```
+
+2. **NEVER encode opacity in hex color strings** - 8-char colors (e.g., `"00000020"`) corrupt the file. Use the `opacity` property instead.
+ ```javascript
+ shadow: { type: "outer", blur: 6, offset: 2, color: "00000020" } // ❌ CORRUPTS FILE
+ shadow: { type: "outer", blur: 6, offset: 2, color: "000000", opacity: 0.12 } // ✅ CORRECT
+ ```
+
+3. **Use `bullet: true`** - NEVER unicode symbols like "•" (creates double bullets)
+
+4. **Use `breakLine: true`** between array items or text runs together
+
+5. **Avoid `lineSpacing` with bullets** - causes excessive gaps; use `paraSpaceAfter` instead
+
+6. **Each presentation needs fresh instance** - don't reuse `pptxgen()` objects
+
+7. **NEVER reuse option objects across calls** - PptxGenJS mutates objects in-place (e.g. converting shadow values to EMU). Sharing one object between multiple calls corrupts the second shape.
+ ```javascript
+ const shadow = { type: "outer", blur: 6, offset: 2, color: "000000", opacity: 0.15 };
+ slide.addShape(pres.shapes.RECTANGLE, { shadow, ... }); // ❌ second call gets already-converted values
+ slide.addShape(pres.shapes.RECTANGLE, { shadow, ... });
+
+ const makeShadow = () => ({ type: "outer", blur: 6, offset: 2, color: "000000", opacity: 0.15 });
+ slide.addShape(pres.shapes.RECTANGLE, { shadow: makeShadow(), ... }); // ✅ fresh object each time
+ slide.addShape(pres.shapes.RECTANGLE, { shadow: makeShadow(), ... });
+ ```
+
+8. **Don't use `ROUNDED_RECTANGLE` with accent borders** - rectangular overlay bars won't cover rounded corners. Use `RECTANGLE` instead.
+ ```javascript
+ // ❌ WRONG: Accent bar doesn't cover rounded corners
+ slide.addShape(pres.shapes.ROUNDED_RECTANGLE, { x: 1, y: 1, w: 3, h: 1.5, fill: { color: "FFFFFF" } });
+ slide.addShape(pres.shapes.RECTANGLE, { x: 1, y: 1, w: 0.08, h: 1.5, fill: { color: "0891B2" } });
+
+ // ✅ CORRECT: Use RECTANGLE for clean alignment
+ slide.addShape(pres.shapes.RECTANGLE, { x: 1, y: 1, w: 3, h: 1.5, fill: { color: "FFFFFF" } });
+ slide.addShape(pres.shapes.RECTANGLE, { x: 1, y: 1, w: 0.08, h: 1.5, fill: { color: "0891B2" } });
+ ```
+
+---
+
+## Quick Reference
+
+- **Shapes**: RECTANGLE, OVAL, LINE, ROUNDED_RECTANGLE
+- **Charts**: BAR, LINE, PIE, DOUGHNUT, SCATTER, BUBBLE, RADAR
+- **Layouts**: LAYOUT_16x9 (10"×5.625"), LAYOUT_16x10, LAYOUT_4x3, LAYOUT_WIDE
+- **Alignment**: "left", "center", "right"
+- **Chart data labels**: "outEnd", "inEnd", "center"
diff --git a/.claude/skills/pptx/scripts/__init__.py b/.claude/skills/pptx/scripts/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.claude/skills/pptx/scripts/add_slide.py b/.claude/skills/pptx/scripts/add_slide.py
new file mode 100644
index 0000000..13700df
--- /dev/null
+++ b/.claude/skills/pptx/scripts/add_slide.py
@@ -0,0 +1,195 @@
+"""Add a new slide to an unpacked PPTX directory.
+
+Usage: python add_slide.py
+
+The source can be:
+ - A slide file (e.g., slide2.xml) - duplicates the slide
+ - A layout file (e.g., slideLayout2.xml) - creates from layout
+
+Examples:
+ python add_slide.py unpacked/ slide2.xml
+ # Duplicates slide2, creates slide5.xml
+
+ python add_slide.py unpacked/ slideLayout2.xml
+ # Creates slide5.xml from slideLayout2.xml
+
+To see available layouts: ls unpacked/ppt/slideLayouts/
+
+Prints the element to add to presentation.xml.
+"""
+
+import re
+import shutil
+import sys
+from pathlib import Path
+
+
+def get_next_slide_number(slides_dir: Path) -> int:
+ existing = [int(m.group(1)) for f in slides_dir.glob("slide*.xml")
+ if (m := re.match(r"slide(\d+)\.xml", f.name))]
+ return max(existing) + 1 if existing else 1
+
+
+def create_slide_from_layout(unpacked_dir: Path, layout_file: str) -> None:
+ slides_dir = unpacked_dir / "ppt" / "slides"
+ rels_dir = slides_dir / "_rels"
+ layouts_dir = unpacked_dir / "ppt" / "slideLayouts"
+
+ layout_path = layouts_dir / layout_file
+ if not layout_path.exists():
+ print(f"Error: {layout_path} not found", file=sys.stderr)
+ sys.exit(1)
+
+ next_num = get_next_slide_number(slides_dir)
+ dest = f"slide{next_num}.xml"
+ dest_slide = slides_dir / dest
+ dest_rels = rels_dir / f"{dest}.rels"
+
+ slide_xml = '''
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ '''
+ dest_slide.write_text(slide_xml, encoding="utf-8")
+
+ rels_dir.mkdir(exist_ok=True)
+ rels_xml = f'''
+
+
+ '''
+ dest_rels.write_text(rels_xml, encoding="utf-8")
+
+ _add_to_content_types(unpacked_dir, dest)
+
+ rid = _add_to_presentation_rels(unpacked_dir, dest)
+
+ next_slide_id = _get_next_slide_id(unpacked_dir)
+
+ print(f"Created {dest} from {layout_file}")
+ print(f'Add to presentation.xml : ')
+
+
+def duplicate_slide(unpacked_dir: Path, source: str) -> None:
+ slides_dir = unpacked_dir / "ppt" / "slides"
+ rels_dir = slides_dir / "_rels"
+
+ source_slide = slides_dir / source
+
+ if not source_slide.exists():
+ print(f"Error: {source_slide} not found", file=sys.stderr)
+ sys.exit(1)
+
+ next_num = get_next_slide_number(slides_dir)
+ dest = f"slide{next_num}.xml"
+ dest_slide = slides_dir / dest
+
+ source_rels = rels_dir / f"{source}.rels"
+ dest_rels = rels_dir / f"{dest}.rels"
+
+ shutil.copy2(source_slide, dest_slide)
+
+ if source_rels.exists():
+ shutil.copy2(source_rels, dest_rels)
+
+ rels_content = dest_rels.read_text(encoding="utf-8")
+ rels_content = re.sub(
+ r'\s*]*Type="[^"]*notesSlide"[^>]*/>\s*',
+ "\n",
+ rels_content,
+ )
+ dest_rels.write_text(rels_content, encoding="utf-8")
+
+ _add_to_content_types(unpacked_dir, dest)
+
+ rid = _add_to_presentation_rels(unpacked_dir, dest)
+
+ next_slide_id = _get_next_slide_id(unpacked_dir)
+
+ print(f"Created {dest} from {source}")
+ print(f'Add to presentation.xml : ')
+
+
+def _add_to_content_types(unpacked_dir: Path, dest: str) -> None:
+ content_types_path = unpacked_dir / "[Content_Types].xml"
+ content_types = content_types_path.read_text(encoding="utf-8")
+
+ new_override = f' '
+
+ if f"/ppt/slides/{dest}" not in content_types:
+ content_types = content_types.replace("", f" {new_override}\n")
+ content_types_path.write_text(content_types, encoding="utf-8")
+
+
+def _add_to_presentation_rels(unpacked_dir: Path, dest: str) -> str:
+ pres_rels_path = unpacked_dir / "ppt" / "_rels" / "presentation.xml.rels"
+ pres_rels = pres_rels_path.read_text(encoding="utf-8")
+
+ rids = [int(m) for m in re.findall(r'Id="rId(\d+)"', pres_rels)]
+ next_rid = max(rids) + 1 if rids else 1
+ rid = f"rId{next_rid}"
+
+ new_rel = f' '
+
+ if f"slides/{dest}" not in pres_rels:
+ pres_rels = pres_rels.replace("", f" {new_rel}\n")
+ pres_rels_path.write_text(pres_rels, encoding="utf-8")
+
+ return rid
+
+
+def _get_next_slide_id(unpacked_dir: Path) -> int:
+ pres_path = unpacked_dir / "ppt" / "presentation.xml"
+ pres_content = pres_path.read_text(encoding="utf-8")
+ slide_ids = [int(m) for m in re.findall(r']*id="(\d+)"', pres_content)]
+ return max(slide_ids) + 1 if slide_ids else 256
+
+
+def parse_source(source: str) -> tuple[str, str | None]:
+ if source.startswith("slideLayout") and source.endswith(".xml"):
+ return ("layout", source)
+
+ return ("slide", None)
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 3:
+ print("Usage: python add_slide.py ", file=sys.stderr)
+ print("", file=sys.stderr)
+ print("Source can be:", file=sys.stderr)
+ print(" slide2.xml - duplicate an existing slide", file=sys.stderr)
+ print(" slideLayout2.xml - create from a layout template", file=sys.stderr)
+ print("", file=sys.stderr)
+ print("To see available layouts: ls /ppt/slideLayouts/", file=sys.stderr)
+ sys.exit(1)
+
+ unpacked_dir = Path(sys.argv[1])
+ source = sys.argv[2]
+
+ if not unpacked_dir.exists():
+ print(f"Error: {unpacked_dir} not found", file=sys.stderr)
+ sys.exit(1)
+
+ source_type, layout_file = parse_source(source)
+
+ if source_type == "layout" and layout_file is not None:
+ create_slide_from_layout(unpacked_dir, layout_file)
+ else:
+ duplicate_slide(unpacked_dir, source)
diff --git a/.claude/skills/pptx/scripts/clean.py b/.claude/skills/pptx/scripts/clean.py
new file mode 100644
index 0000000..3d13994
--- /dev/null
+++ b/.claude/skills/pptx/scripts/clean.py
@@ -0,0 +1,286 @@
+"""Remove unreferenced files from an unpacked PPTX directory.
+
+Usage: python clean.py
+
+Example:
+ python clean.py unpacked/
+
+This script removes:
+- Orphaned slides (not in sldIdLst) and their relationships
+- [trash] directory (unreferenced files)
+- Orphaned .rels files for deleted resources
+- Unreferenced media, embeddings, charts, diagrams, drawings, ink files
+- Unreferenced theme files
+- Unreferenced notes slides
+- Content-Type overrides for deleted files
+"""
+
+import sys
+from pathlib import Path
+
+import defusedxml.minidom
+
+
+import re
+
+
+def get_slides_in_sldidlst(unpacked_dir: Path) -> set[str]:
+ pres_path = unpacked_dir / "ppt" / "presentation.xml"
+ pres_rels_path = unpacked_dir / "ppt" / "_rels" / "presentation.xml.rels"
+
+ if not pres_path.exists() or not pres_rels_path.exists():
+ return set()
+
+ rels_dom = defusedxml.minidom.parse(str(pres_rels_path))
+ rid_to_slide = {}
+ for rel in rels_dom.getElementsByTagName("Relationship"):
+ rid = rel.getAttribute("Id")
+ target = rel.getAttribute("Target")
+ rel_type = rel.getAttribute("Type")
+ if "slide" in rel_type and target.startswith("slides/"):
+ rid_to_slide[rid] = target.replace("slides/", "")
+
+ pres_content = pres_path.read_text(encoding="utf-8")
+ referenced_rids = set(re.findall(r']*r:id="([^"]+)"', pres_content))
+
+ return {rid_to_slide[rid] for rid in referenced_rids if rid in rid_to_slide}
+
+
+def remove_orphaned_slides(unpacked_dir: Path) -> list[str]:
+ slides_dir = unpacked_dir / "ppt" / "slides"
+ slides_rels_dir = slides_dir / "_rels"
+ pres_rels_path = unpacked_dir / "ppt" / "_rels" / "presentation.xml.rels"
+
+ if not slides_dir.exists():
+ return []
+
+ referenced_slides = get_slides_in_sldidlst(unpacked_dir)
+ removed = []
+
+ for slide_file in slides_dir.glob("slide*.xml"):
+ if slide_file.name not in referenced_slides:
+ rel_path = slide_file.relative_to(unpacked_dir)
+ slide_file.unlink()
+ removed.append(str(rel_path))
+
+ rels_file = slides_rels_dir / f"{slide_file.name}.rels"
+ if rels_file.exists():
+ rels_file.unlink()
+ removed.append(str(rels_file.relative_to(unpacked_dir)))
+
+ if removed and pres_rels_path.exists():
+ rels_dom = defusedxml.minidom.parse(str(pres_rels_path))
+ changed = False
+
+ for rel in list(rels_dom.getElementsByTagName("Relationship")):
+ target = rel.getAttribute("Target")
+ if target.startswith("slides/"):
+ slide_name = target.replace("slides/", "")
+ if slide_name not in referenced_slides:
+ if rel.parentNode:
+ rel.parentNode.removeChild(rel)
+ changed = True
+
+ if changed:
+ with open(pres_rels_path, "wb") as f:
+ f.write(rels_dom.toxml(encoding="utf-8"))
+
+ return removed
+
+
+def remove_trash_directory(unpacked_dir: Path) -> list[str]:
+ trash_dir = unpacked_dir / "[trash]"
+ removed = []
+
+ if trash_dir.exists() and trash_dir.is_dir():
+ for file_path in trash_dir.iterdir():
+ if file_path.is_file():
+ rel_path = file_path.relative_to(unpacked_dir)
+ removed.append(str(rel_path))
+ file_path.unlink()
+ trash_dir.rmdir()
+
+ return removed
+
+
+def get_slide_referenced_files(unpacked_dir: Path) -> set:
+ referenced = set()
+ slides_rels_dir = unpacked_dir / "ppt" / "slides" / "_rels"
+
+ if not slides_rels_dir.exists():
+ return referenced
+
+ for rels_file in slides_rels_dir.glob("*.rels"):
+ dom = defusedxml.minidom.parse(str(rels_file))
+ for rel in dom.getElementsByTagName("Relationship"):
+ target = rel.getAttribute("Target")
+ if not target:
+ continue
+ target_path = (rels_file.parent.parent / target).resolve()
+ try:
+ referenced.add(target_path.relative_to(unpacked_dir.resolve()))
+ except ValueError:
+ pass
+
+ return referenced
+
+
+def remove_orphaned_rels_files(unpacked_dir: Path) -> list[str]:
+ resource_dirs = ["charts", "diagrams", "drawings"]
+ removed = []
+ slide_referenced = get_slide_referenced_files(unpacked_dir)
+
+ for dir_name in resource_dirs:
+ rels_dir = unpacked_dir / "ppt" / dir_name / "_rels"
+ if not rels_dir.exists():
+ continue
+
+ for rels_file in rels_dir.glob("*.rels"):
+ resource_file = rels_dir.parent / rels_file.name.replace(".rels", "")
+ try:
+ resource_rel_path = resource_file.resolve().relative_to(unpacked_dir.resolve())
+ except ValueError:
+ continue
+
+ if not resource_file.exists() or resource_rel_path not in slide_referenced:
+ rels_file.unlink()
+ rel_path = rels_file.relative_to(unpacked_dir)
+ removed.append(str(rel_path))
+
+ return removed
+
+
+def get_referenced_files(unpacked_dir: Path) -> set:
+ referenced = set()
+
+ for rels_file in unpacked_dir.rglob("*.rels"):
+ dom = defusedxml.minidom.parse(str(rels_file))
+ for rel in dom.getElementsByTagName("Relationship"):
+ target = rel.getAttribute("Target")
+ if not target:
+ continue
+ target_path = (rels_file.parent.parent / target).resolve()
+ try:
+ referenced.add(target_path.relative_to(unpacked_dir.resolve()))
+ except ValueError:
+ pass
+
+ return referenced
+
+
+def remove_orphaned_files(unpacked_dir: Path, referenced: set) -> list[str]:
+ resource_dirs = ["media", "embeddings", "charts", "diagrams", "tags", "drawings", "ink"]
+ removed = []
+
+ for dir_name in resource_dirs:
+ dir_path = unpacked_dir / "ppt" / dir_name
+ if not dir_path.exists():
+ continue
+
+ for file_path in dir_path.glob("*"):
+ if not file_path.is_file():
+ continue
+ rel_path = file_path.relative_to(unpacked_dir)
+ if rel_path not in referenced:
+ file_path.unlink()
+ removed.append(str(rel_path))
+
+ theme_dir = unpacked_dir / "ppt" / "theme"
+ if theme_dir.exists():
+ for file_path in theme_dir.glob("theme*.xml"):
+ rel_path = file_path.relative_to(unpacked_dir)
+ if rel_path not in referenced:
+ file_path.unlink()
+ removed.append(str(rel_path))
+ theme_rels = theme_dir / "_rels" / f"{file_path.name}.rels"
+ if theme_rels.exists():
+ theme_rels.unlink()
+ removed.append(str(theme_rels.relative_to(unpacked_dir)))
+
+ notes_dir = unpacked_dir / "ppt" / "notesSlides"
+ if notes_dir.exists():
+ for file_path in notes_dir.glob("*.xml"):
+ if not file_path.is_file():
+ continue
+ rel_path = file_path.relative_to(unpacked_dir)
+ if rel_path not in referenced:
+ file_path.unlink()
+ removed.append(str(rel_path))
+
+ notes_rels_dir = notes_dir / "_rels"
+ if notes_rels_dir.exists():
+ for file_path in notes_rels_dir.glob("*.rels"):
+ notes_file = notes_dir / file_path.name.replace(".rels", "")
+ if not notes_file.exists():
+ file_path.unlink()
+ removed.append(str(file_path.relative_to(unpacked_dir)))
+
+ return removed
+
+
+def update_content_types(unpacked_dir: Path, removed_files: list[str]) -> None:
+ ct_path = unpacked_dir / "[Content_Types].xml"
+ if not ct_path.exists():
+ return
+
+ dom = defusedxml.minidom.parse(str(ct_path))
+ changed = False
+
+ for override in list(dom.getElementsByTagName("Override")):
+ part_name = override.getAttribute("PartName").lstrip("/")
+ if part_name in removed_files:
+ if override.parentNode:
+ override.parentNode.removeChild(override)
+ changed = True
+
+ if changed:
+ with open(ct_path, "wb") as f:
+ f.write(dom.toxml(encoding="utf-8"))
+
+
+def clean_unused_files(unpacked_dir: Path) -> list[str]:
+ all_removed = []
+
+ slides_removed = remove_orphaned_slides(unpacked_dir)
+ all_removed.extend(slides_removed)
+
+ trash_removed = remove_trash_directory(unpacked_dir)
+ all_removed.extend(trash_removed)
+
+ while True:
+ removed_rels = remove_orphaned_rels_files(unpacked_dir)
+ referenced = get_referenced_files(unpacked_dir)
+ removed_files = remove_orphaned_files(unpacked_dir, referenced)
+
+ total_removed = removed_rels + removed_files
+ if not total_removed:
+ break
+
+ all_removed.extend(total_removed)
+
+ if all_removed:
+ update_content_types(unpacked_dir, all_removed)
+
+ return all_removed
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print("Usage: python clean.py ", file=sys.stderr)
+ print("Example: python clean.py unpacked/", file=sys.stderr)
+ sys.exit(1)
+
+ unpacked_dir = Path(sys.argv[1])
+
+ if not unpacked_dir.exists():
+ print(f"Error: {unpacked_dir} not found", file=sys.stderr)
+ sys.exit(1)
+
+ removed = clean_unused_files(unpacked_dir)
+
+ if removed:
+ print(f"Removed {len(removed)} unreferenced files:")
+ for f in removed:
+ print(f" {f}")
+ else:
+ print("No unreferenced files found")
diff --git a/.claude/skills/pptx/scripts/office/helpers/__init__.py b/.claude/skills/pptx/scripts/office/helpers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.claude/skills/pptx/scripts/office/helpers/merge_runs.py b/.claude/skills/pptx/scripts/office/helpers/merge_runs.py
new file mode 100644
index 0000000..ad7c25e
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/helpers/merge_runs.py
@@ -0,0 +1,199 @@
+"""Merge adjacent runs with identical formatting in DOCX.
+
+Merges adjacent elements that have identical properties.
+Works on runs in paragraphs and inside tracked changes (, ).
+
+Also:
+- Removes rsid attributes from runs (revision metadata that doesn't affect rendering)
+- Removes proofErr elements (spell/grammar markers that block merging)
+"""
+
+from pathlib import Path
+
+import defusedxml.minidom
+
+
+def merge_runs(input_dir: str) -> tuple[int, str]:
+ doc_xml = Path(input_dir) / "word" / "document.xml"
+
+ if not doc_xml.exists():
+ return 0, f"Error: {doc_xml} not found"
+
+ try:
+ dom = defusedxml.minidom.parseString(doc_xml.read_text(encoding="utf-8"))
+ root = dom.documentElement
+
+ _remove_elements(root, "proofErr")
+ _strip_run_rsid_attrs(root)
+
+ containers = {run.parentNode for run in _find_elements(root, "r")}
+
+ merge_count = 0
+ for container in containers:
+ merge_count += _merge_runs_in(container)
+
+ doc_xml.write_bytes(dom.toxml(encoding="UTF-8"))
+ return merge_count, f"Merged {merge_count} runs"
+
+ except Exception as e:
+ return 0, f"Error: {e}"
+
+
+
+
+def _find_elements(root, tag: str) -> list:
+ results = []
+
+ def traverse(node):
+ if node.nodeType == node.ELEMENT_NODE:
+ name = node.localName or node.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(node)
+ for child in node.childNodes:
+ traverse(child)
+
+ traverse(root)
+ return results
+
+
+def _get_child(parent, tag: str):
+ for child in parent.childNodes:
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ return child
+ return None
+
+
+def _get_children(parent, tag: str) -> list:
+ results = []
+ for child in parent.childNodes:
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(child)
+ return results
+
+
+def _is_adjacent(elem1, elem2) -> bool:
+ node = elem1.nextSibling
+ while node:
+ if node == elem2:
+ return True
+ if node.nodeType == node.ELEMENT_NODE:
+ return False
+ if node.nodeType == node.TEXT_NODE and node.data.strip():
+ return False
+ node = node.nextSibling
+ return False
+
+
+
+
+def _remove_elements(root, tag: str):
+ for elem in _find_elements(root, tag):
+ if elem.parentNode:
+ elem.parentNode.removeChild(elem)
+
+
+def _strip_run_rsid_attrs(root):
+ for run in _find_elements(root, "r"):
+ for attr in list(run.attributes.values()):
+ if "rsid" in attr.name.lower():
+ run.removeAttribute(attr.name)
+
+
+
+
+def _merge_runs_in(container) -> int:
+ merge_count = 0
+ run = _first_child_run(container)
+
+ while run:
+ while True:
+ next_elem = _next_element_sibling(run)
+ if next_elem and _is_run(next_elem) and _can_merge(run, next_elem):
+ _merge_run_content(run, next_elem)
+ container.removeChild(next_elem)
+ merge_count += 1
+ else:
+ break
+
+ _consolidate_text(run)
+ run = _next_sibling_run(run)
+
+ return merge_count
+
+
+def _first_child_run(container):
+ for child in container.childNodes:
+ if child.nodeType == child.ELEMENT_NODE and _is_run(child):
+ return child
+ return None
+
+
+def _next_element_sibling(node):
+ sibling = node.nextSibling
+ while sibling:
+ if sibling.nodeType == sibling.ELEMENT_NODE:
+ return sibling
+ sibling = sibling.nextSibling
+ return None
+
+
+def _next_sibling_run(node):
+ sibling = node.nextSibling
+ while sibling:
+ if sibling.nodeType == sibling.ELEMENT_NODE:
+ if _is_run(sibling):
+ return sibling
+ sibling = sibling.nextSibling
+ return None
+
+
+def _is_run(node) -> bool:
+ name = node.localName or node.tagName
+ return name == "r" or name.endswith(":r")
+
+
+def _can_merge(run1, run2) -> bool:
+ rpr1 = _get_child(run1, "rPr")
+ rpr2 = _get_child(run2, "rPr")
+
+ if (rpr1 is None) != (rpr2 is None):
+ return False
+ if rpr1 is None:
+ return True
+ return rpr1.toxml() == rpr2.toxml()
+
+
+def _merge_run_content(target, source):
+ for child in list(source.childNodes):
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name != "rPr" and not name.endswith(":rPr"):
+ target.appendChild(child)
+
+
+def _consolidate_text(run):
+ t_elements = _get_children(run, "t")
+
+ for i in range(len(t_elements) - 1, 0, -1):
+ curr, prev = t_elements[i], t_elements[i - 1]
+
+ if _is_adjacent(prev, curr):
+ prev_text = prev.firstChild.data if prev.firstChild else ""
+ curr_text = curr.firstChild.data if curr.firstChild else ""
+ merged = prev_text + curr_text
+
+ if prev.firstChild:
+ prev.firstChild.data = merged
+ else:
+ prev.appendChild(run.ownerDocument.createTextNode(merged))
+
+ if merged.startswith(" ") or merged.endswith(" "):
+ prev.setAttribute("xml:space", "preserve")
+ elif prev.hasAttribute("xml:space"):
+ prev.removeAttribute("xml:space")
+
+ run.removeChild(curr)
diff --git a/.claude/skills/pptx/scripts/office/helpers/simplify_redlines.py b/.claude/skills/pptx/scripts/office/helpers/simplify_redlines.py
new file mode 100644
index 0000000..db963bb
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/helpers/simplify_redlines.py
@@ -0,0 +1,197 @@
+"""Simplify tracked changes by merging adjacent w:ins or w:del elements.
+
+Merges adjacent elements from the same author into a single element.
+Same for elements. This makes heavily-redlined documents easier to
+work with by reducing the number of tracked change wrappers.
+
+Rules:
+- Only merges w:ins with w:ins, w:del with w:del (same element type)
+- Only merges if same author (ignores timestamp differences)
+- Only merges if truly adjacent (only whitespace between them)
+"""
+
+import xml.etree.ElementTree as ET
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+WORD_NS = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+
+
+def simplify_redlines(input_dir: str) -> tuple[int, str]:
+ doc_xml = Path(input_dir) / "word" / "document.xml"
+
+ if not doc_xml.exists():
+ return 0, f"Error: {doc_xml} not found"
+
+ try:
+ dom = defusedxml.minidom.parseString(doc_xml.read_text(encoding="utf-8"))
+ root = dom.documentElement
+
+ merge_count = 0
+
+ containers = _find_elements(root, "p") + _find_elements(root, "tc")
+
+ for container in containers:
+ merge_count += _merge_tracked_changes_in(container, "ins")
+ merge_count += _merge_tracked_changes_in(container, "del")
+
+ doc_xml.write_bytes(dom.toxml(encoding="UTF-8"))
+ return merge_count, f"Simplified {merge_count} tracked changes"
+
+ except Exception as e:
+ return 0, f"Error: {e}"
+
+
+def _merge_tracked_changes_in(container, tag: str) -> int:
+ merge_count = 0
+
+ tracked = [
+ child
+ for child in container.childNodes
+ if child.nodeType == child.ELEMENT_NODE and _is_element(child, tag)
+ ]
+
+ if len(tracked) < 2:
+ return 0
+
+ i = 0
+ while i < len(tracked) - 1:
+ curr = tracked[i]
+ next_elem = tracked[i + 1]
+
+ if _can_merge_tracked(curr, next_elem):
+ _merge_tracked_content(curr, next_elem)
+ container.removeChild(next_elem)
+ tracked.pop(i + 1)
+ merge_count += 1
+ else:
+ i += 1
+
+ return merge_count
+
+
+def _is_element(node, tag: str) -> bool:
+ name = node.localName or node.tagName
+ return name == tag or name.endswith(f":{tag}")
+
+
+def _get_author(elem) -> str:
+ author = elem.getAttribute("w:author")
+ if not author:
+ for attr in elem.attributes.values():
+ if attr.localName == "author" or attr.name.endswith(":author"):
+ return attr.value
+ return author
+
+
+def _can_merge_tracked(elem1, elem2) -> bool:
+ if _get_author(elem1) != _get_author(elem2):
+ return False
+
+ node = elem1.nextSibling
+ while node and node != elem2:
+ if node.nodeType == node.ELEMENT_NODE:
+ return False
+ if node.nodeType == node.TEXT_NODE and node.data.strip():
+ return False
+ node = node.nextSibling
+
+ return True
+
+
+def _merge_tracked_content(target, source):
+ while source.firstChild:
+ child = source.firstChild
+ source.removeChild(child)
+ target.appendChild(child)
+
+
+def _find_elements(root, tag: str) -> list:
+ results = []
+
+ def traverse(node):
+ if node.nodeType == node.ELEMENT_NODE:
+ name = node.localName or node.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(node)
+ for child in node.childNodes:
+ traverse(child)
+
+ traverse(root)
+ return results
+
+
+def get_tracked_change_authors(doc_xml_path: Path) -> dict[str, int]:
+ if not doc_xml_path.exists():
+ return {}
+
+ try:
+ tree = ET.parse(doc_xml_path)
+ root = tree.getroot()
+ except ET.ParseError:
+ return {}
+
+ namespaces = {"w": WORD_NS}
+ author_attr = f"{{{WORD_NS}}}author"
+
+ authors: dict[str, int] = {}
+ for tag in ["ins", "del"]:
+ for elem in root.findall(f".//w:{tag}", namespaces):
+ author = elem.get(author_attr)
+ if author:
+ authors[author] = authors.get(author, 0) + 1
+
+ return authors
+
+
+def _get_authors_from_docx(docx_path: Path) -> dict[str, int]:
+ try:
+ with zipfile.ZipFile(docx_path, "r") as zf:
+ if "word/document.xml" not in zf.namelist():
+ return {}
+ with zf.open("word/document.xml") as f:
+ tree = ET.parse(f)
+ root = tree.getroot()
+
+ namespaces = {"w": WORD_NS}
+ author_attr = f"{{{WORD_NS}}}author"
+
+ authors: dict[str, int] = {}
+ for tag in ["ins", "del"]:
+ for elem in root.findall(f".//w:{tag}", namespaces):
+ author = elem.get(author_attr)
+ if author:
+ authors[author] = authors.get(author, 0) + 1
+ return authors
+ except (zipfile.BadZipFile, ET.ParseError):
+ return {}
+
+
+def infer_author(modified_dir: Path, original_docx: Path, default: str = "Claude") -> str:
+ modified_xml = modified_dir / "word" / "document.xml"
+ modified_authors = get_tracked_change_authors(modified_xml)
+
+ if not modified_authors:
+ return default
+
+ original_authors = _get_authors_from_docx(original_docx)
+
+ new_changes: dict[str, int] = {}
+ for author, count in modified_authors.items():
+ original_count = original_authors.get(author, 0)
+ diff = count - original_count
+ if diff > 0:
+ new_changes[author] = diff
+
+ if not new_changes:
+ return default
+
+ if len(new_changes) == 1:
+ return next(iter(new_changes))
+
+ raise ValueError(
+ f"Multiple authors added new changes: {new_changes}. "
+ "Cannot infer which author to validate."
+ )
diff --git a/.claude/skills/pptx/scripts/office/pack.py b/.claude/skills/pptx/scripts/office/pack.py
new file mode 100644
index 0000000..db29ed8
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/pack.py
@@ -0,0 +1,159 @@
+"""Pack a directory into a DOCX, PPTX, or XLSX file.
+
+Validates with auto-repair, condenses XML formatting, and creates the Office file.
+
+Usage:
+ python pack.py [--original ] [--validate true|false]
+
+Examples:
+ python pack.py unpacked/ output.docx --original input.docx
+ python pack.py unpacked/ output.pptx --validate false
+"""
+
+import argparse
+import sys
+import shutil
+import tempfile
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+from validators import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator
+
+def pack(
+ input_directory: str,
+ output_file: str,
+ original_file: str | None = None,
+ validate: bool = True,
+ infer_author_func=None,
+) -> tuple[None, str]:
+ input_dir = Path(input_directory)
+ output_path = Path(output_file)
+ suffix = output_path.suffix.lower()
+
+ if not input_dir.is_dir():
+ return None, f"Error: {input_dir} is not a directory"
+
+ if suffix not in {".docx", ".pptx", ".xlsx"}:
+ return None, f"Error: {output_file} must be a .docx, .pptx, or .xlsx file"
+
+ if validate and original_file:
+ original_path = Path(original_file)
+ if original_path.exists():
+ success, output = _run_validation(
+ input_dir, original_path, suffix, infer_author_func
+ )
+ if output:
+ print(output)
+ if not success:
+ return None, f"Error: Validation failed for {input_dir}"
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_content_dir = Path(temp_dir) / "content"
+ shutil.copytree(input_dir, temp_content_dir)
+
+ for pattern in ["*.xml", "*.rels"]:
+ for xml_file in temp_content_dir.rglob(pattern):
+ _condense_xml(xml_file)
+
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as zf:
+ for f in temp_content_dir.rglob("*"):
+ if f.is_file():
+ zf.write(f, f.relative_to(temp_content_dir))
+
+ return None, f"Successfully packed {input_dir} to {output_file}"
+
+
+def _run_validation(
+ unpacked_dir: Path,
+ original_file: Path,
+ suffix: str,
+ infer_author_func=None,
+) -> tuple[bool, str | None]:
+ output_lines = []
+ validators = []
+
+ if suffix == ".docx":
+ author = "Claude"
+ if infer_author_func:
+ try:
+ author = infer_author_func(unpacked_dir, original_file)
+ except ValueError as e:
+ print(f"Warning: {e} Using default author 'Claude'.", file=sys.stderr)
+
+ validators = [
+ DOCXSchemaValidator(unpacked_dir, original_file),
+ RedliningValidator(unpacked_dir, original_file, author=author),
+ ]
+ elif suffix == ".pptx":
+ validators = [PPTXSchemaValidator(unpacked_dir, original_file)]
+
+ if not validators:
+ return True, None
+
+ total_repairs = sum(v.repair() for v in validators)
+ if total_repairs:
+ output_lines.append(f"Auto-repaired {total_repairs} issue(s)")
+
+ success = all(v.validate() for v in validators)
+
+ if success:
+ output_lines.append("All validations PASSED!")
+
+ return success, "\n".join(output_lines) if output_lines else None
+
+
+def _condense_xml(xml_file: Path) -> None:
+ try:
+ with open(xml_file, encoding="utf-8") as f:
+ dom = defusedxml.minidom.parse(f)
+
+ for element in dom.getElementsByTagName("*"):
+ if element.tagName.endswith(":t"):
+ continue
+
+ for child in list(element.childNodes):
+ if (
+ child.nodeType == child.TEXT_NODE
+ and child.nodeValue
+ and child.nodeValue.strip() == ""
+ ) or child.nodeType == child.COMMENT_NODE:
+ element.removeChild(child)
+
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+ except Exception as e:
+ print(f"ERROR: Failed to parse {xml_file.name}: {e}", file=sys.stderr)
+ raise
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Pack a directory into a DOCX, PPTX, or XLSX file"
+ )
+ parser.add_argument("input_directory", help="Unpacked Office document directory")
+ parser.add_argument("output_file", help="Output Office file (.docx/.pptx/.xlsx)")
+ parser.add_argument(
+ "--original",
+ help="Original file for validation comparison",
+ )
+ parser.add_argument(
+ "--validate",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Run validation with auto-repair (default: true)",
+ )
+ args = parser.parse_args()
+
+ _, message = pack(
+ args.input_directory,
+ args.output_file,
+ original_file=args.original,
+ validate=args.validate,
+ )
+ print(message)
+
+ if "Error" in message:
+ sys.exit(1)
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
new file mode 100644
index 0000000..6454ef9
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
@@ -0,0 +1,1499 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
new file mode 100644
index 0000000..afa4f46
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
new file mode 100644
index 0000000..64e66b8
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
@@ -0,0 +1,1085 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
new file mode 100644
index 0000000..687eea8
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
@@ -0,0 +1,11 @@
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
new file mode 100644
index 0000000..6ac81b0
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
@@ -0,0 +1,3081 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
new file mode 100644
index 0000000..1dbf051
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
@@ -0,0 +1,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
new file mode 100644
index 0000000..f1af17d
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
@@ -0,0 +1,185 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
new file mode 100644
index 0000000..0a185ab
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
@@ -0,0 +1,287 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
new file mode 100644
index 0000000..14ef488
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
@@ -0,0 +1,1676 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
new file mode 100644
index 0000000..c20f3bf
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
new file mode 100644
index 0000000..ac60252
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
@@ -0,0 +1,144 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
new file mode 100644
index 0000000..424b8ba
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
@@ -0,0 +1,174 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
new file mode 100644
index 0000000..2bddce2
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
new file mode 100644
index 0000000..8a8c18b
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
new file mode 100644
index 0000000..5c42706
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
@@ -0,0 +1,59 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
new file mode 100644
index 0000000..853c341
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
@@ -0,0 +1,56 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
new file mode 100644
index 0000000..da835ee
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
@@ -0,0 +1,195 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
new file mode 100644
index 0000000..87ad265
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
@@ -0,0 +1,582 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
new file mode 100644
index 0000000..9e86f1b
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
new file mode 100644
index 0000000..d0be42e
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
@@ -0,0 +1,4439 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
new file mode 100644
index 0000000..8821dd1
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
@@ -0,0 +1,570 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
new file mode 100644
index 0000000..ca2575c
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
@@ -0,0 +1,509 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
new file mode 100644
index 0000000..dd079e6
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
new file mode 100644
index 0000000..3dd6cf6
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
@@ -0,0 +1,108 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
new file mode 100644
index 0000000..f1041e3
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
@@ -0,0 +1,96 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
new file mode 100644
index 0000000..9c5b7a6
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
@@ -0,0 +1,3646 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
new file mode 100644
index 0000000..0f13678
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
@@ -0,0 +1,116 @@
+
+
+
+
+
+ See http://www.w3.org/XML/1998/namespace.html and
+ http://www.w3.org/TR/REC-xml for information about this namespace.
+
+ This schema document describes the XML namespace, in a form
+ suitable for import by other schema documents.
+
+ Note that local names in this namespace are intended to be defined
+ only by the World Wide Web Consortium or its subgroups. The
+ following names are currently defined in this namespace and should
+ not be used with conflicting semantics by any Working Group,
+ specification, or document instance:
+
+ base (as an attribute name): denotes an attribute whose value
+ provides a URI to be used as the base for interpreting any
+ relative URIs in the scope of the element on which it
+ appears; its value is inherited. This name is reserved
+ by virtue of its definition in the XML Base specification.
+
+ lang (as an attribute name): denotes an attribute whose value
+ is a language code for the natural language of the content of
+ any element; its value is inherited. This name is reserved
+ by virtue of its definition in the XML specification.
+
+ space (as an attribute name): denotes an attribute whose
+ value is a keyword indicating what whitespace processing
+ discipline is intended for the content of the element; its
+ value is inherited. This name is reserved by virtue of its
+ definition in the XML specification.
+
+ Father (in any context at all): denotes Jon Bosak, the chair of
+ the original XML Working Group. This name is reserved by
+ the following decision of the W3C XML Plenary and
+ XML Coordination groups:
+
+ In appreciation for his vision, leadership and dedication
+ the W3C XML Plenary on this 10th day of February, 2000
+ reserves for Jon Bosak in perpetuity the XML name
+ xml:Father
+
+
+
+
+ This schema defines attributes and an attribute group
+ suitable for use by
+ schemas wishing to allow xml:base, xml:lang or xml:space attributes
+ on elements they define.
+
+ To enable this, such a schema must import this schema
+ for the XML namespace, e.g. as follows:
+ <schema . . .>
+ . . .
+ <import namespace="http://www.w3.org/XML/1998/namespace"
+ schemaLocation="http://www.w3.org/2001/03/xml.xsd"/>
+
+ Subsequently, qualified reference to any of the attributes
+ or the group defined below will have the desired effect, e.g.
+
+ <type . . .>
+ . . .
+ <attributeGroup ref="xml:specialAttrs"/>
+
+ will define a type which will schema-validate an instance
+ element with any of those attributes
+
+
+
+ In keeping with the XML Schema WG's standard versioning
+ policy, this schema document will persist at
+ http://www.w3.org/2001/03/xml.xsd.
+ At the date of issue it can also be found at
+ http://www.w3.org/2001/xml.xsd.
+ The schema document at that URI may however change in the future,
+ in order to remain compatible with the latest version of XML Schema
+ itself. In other words, if the XML Schema namespace changes, the version
+ of this document at
+ http://www.w3.org/2001/xml.xsd will change
+ accordingly; the version at
+ http://www.w3.org/2001/03/xml.xsd will not change.
+
+
+
+
+
+ In due course, we should install the relevant ISO 2- and 3-letter
+ codes as the enumerated possible values . . .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ See http://www.w3.org/TR/xmlbase/ for
+ information about this attribute.
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
new file mode 100644
index 0000000..a6de9d2
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
@@ -0,0 +1,42 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
new file mode 100644
index 0000000..10e978b
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
new file mode 100644
index 0000000..4248bf7
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
@@ -0,0 +1,49 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
new file mode 100644
index 0000000..5649746
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/mce/mc.xsd b/.claude/skills/pptx/scripts/office/schemas/mce/mc.xsd
new file mode 100644
index 0000000..ef72545
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/mce/mc.xsd
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2010.xsd b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2010.xsd
new file mode 100644
index 0000000..f65f777
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2010.xsd
@@ -0,0 +1,560 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2012.xsd b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2012.xsd
new file mode 100644
index 0000000..6b00755
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2012.xsd
@@ -0,0 +1,67 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2018.xsd b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2018.xsd
new file mode 100644
index 0000000..f321d33
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-2018.xsd
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-cex-2018.xsd b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
new file mode 100644
index 0000000..364c6a9
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
@@ -0,0 +1,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-cid-2016.xsd b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
new file mode 100644
index 0000000..fed9d15
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
new file mode 100644
index 0000000..680cf15
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-symex-2015.xsd b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
new file mode 100644
index 0000000..89ada90
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/pptx/scripts/office/soffice.py b/.claude/skills/pptx/scripts/office/soffice.py
new file mode 100644
index 0000000..c7f7e32
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/soffice.py
@@ -0,0 +1,183 @@
+"""
+Helper for running LibreOffice (soffice) in environments where AF_UNIX
+sockets may be blocked (e.g., sandboxed VMs). Detects the restriction
+at runtime and applies an LD_PRELOAD shim if needed.
+
+Usage:
+ from office.soffice import run_soffice, get_soffice_env
+
+ # Option 1 – run soffice directly
+ result = run_soffice(["--headless", "--convert-to", "pdf", "input.docx"])
+
+ # Option 2 – get env dict for your own subprocess calls
+ env = get_soffice_env()
+ subprocess.run(["soffice", ...], env=env)
+"""
+
+import os
+import socket
+import subprocess
+import tempfile
+from pathlib import Path
+
+
+def get_soffice_env() -> dict:
+ env = os.environ.copy()
+ env["SAL_USE_VCLPLUGIN"] = "svp"
+
+ if _needs_shim():
+ shim = _ensure_shim()
+ env["LD_PRELOAD"] = str(shim)
+
+ return env
+
+
+def run_soffice(args: list[str], **kwargs) -> subprocess.CompletedProcess:
+ env = get_soffice_env()
+ return subprocess.run(["soffice"] + args, env=env, **kwargs)
+
+
+
+_SHIM_SO = Path(tempfile.gettempdir()) / "lo_socket_shim.so"
+
+
+def _needs_shim() -> bool:
+ try:
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.close()
+ return False
+ except OSError:
+ return True
+
+
+def _ensure_shim() -> Path:
+ if _SHIM_SO.exists():
+ return _SHIM_SO
+
+ src = Path(tempfile.gettempdir()) / "lo_socket_shim.c"
+ src.write_text(_SHIM_SOURCE)
+ subprocess.run(
+ ["gcc", "-shared", "-fPIC", "-o", str(_SHIM_SO), str(src), "-ldl"],
+ check=True,
+ capture_output=True,
+ )
+ src.unlink()
+ return _SHIM_SO
+
+
+
+_SHIM_SOURCE = r"""
+#define _GNU_SOURCE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+static int (*real_socket)(int, int, int);
+static int (*real_socketpair)(int, int, int, int[2]);
+static int (*real_listen)(int, int);
+static int (*real_accept)(int, struct sockaddr *, socklen_t *);
+static int (*real_close)(int);
+static int (*real_read)(int, void *, size_t);
+
+/* Per-FD bookkeeping (FDs >= 1024 are passed through unshimmed). */
+static int is_shimmed[1024];
+static int peer_of[1024];
+static int wake_r[1024]; /* accept() blocks reading this */
+static int wake_w[1024]; /* close() writes to this */
+static int listener_fd = -1; /* FD that received listen() */
+
+__attribute__((constructor))
+static void init(void) {
+ real_socket = dlsym(RTLD_NEXT, "socket");
+ real_socketpair = dlsym(RTLD_NEXT, "socketpair");
+ real_listen = dlsym(RTLD_NEXT, "listen");
+ real_accept = dlsym(RTLD_NEXT, "accept");
+ real_close = dlsym(RTLD_NEXT, "close");
+ real_read = dlsym(RTLD_NEXT, "read");
+ for (int i = 0; i < 1024; i++) {
+ peer_of[i] = -1;
+ wake_r[i] = -1;
+ wake_w[i] = -1;
+ }
+}
+
+/* ---- socket ---------------------------------------------------------- */
+int socket(int domain, int type, int protocol) {
+ if (domain == AF_UNIX) {
+ int fd = real_socket(domain, type, protocol);
+ if (fd >= 0) return fd;
+ /* socket(AF_UNIX) blocked – fall back to socketpair(). */
+ int sv[2];
+ if (real_socketpair(domain, type, protocol, sv) == 0) {
+ if (sv[0] >= 0 && sv[0] < 1024) {
+ is_shimmed[sv[0]] = 1;
+ peer_of[sv[0]] = sv[1];
+ int wp[2];
+ if (pipe(wp) == 0) {
+ wake_r[sv[0]] = wp[0];
+ wake_w[sv[0]] = wp[1];
+ }
+ }
+ return sv[0];
+ }
+ errno = EPERM;
+ return -1;
+ }
+ return real_socket(domain, type, protocol);
+}
+
+/* ---- listen ---------------------------------------------------------- */
+int listen(int sockfd, int backlog) {
+ if (sockfd >= 0 && sockfd < 1024 && is_shimmed[sockfd]) {
+ listener_fd = sockfd;
+ return 0;
+ }
+ return real_listen(sockfd, backlog);
+}
+
+/* ---- accept ---------------------------------------------------------- */
+int accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) {
+ if (sockfd >= 0 && sockfd < 1024 && is_shimmed[sockfd]) {
+ /* Block until close() writes to the wake pipe. */
+ if (wake_r[sockfd] >= 0) {
+ char buf;
+ real_read(wake_r[sockfd], &buf, 1);
+ }
+ errno = ECONNABORTED;
+ return -1;
+ }
+ return real_accept(sockfd, addr, addrlen);
+}
+
+/* ---- close ----------------------------------------------------------- */
+int close(int fd) {
+ if (fd >= 0 && fd < 1024 && is_shimmed[fd]) {
+ int was_listener = (fd == listener_fd);
+ is_shimmed[fd] = 0;
+
+ if (wake_w[fd] >= 0) { /* unblock accept() */
+ char c = 0;
+ write(wake_w[fd], &c, 1);
+ real_close(wake_w[fd]);
+ wake_w[fd] = -1;
+ }
+ if (wake_r[fd] >= 0) { real_close(wake_r[fd]); wake_r[fd] = -1; }
+ if (peer_of[fd] >= 0) { real_close(peer_of[fd]); peer_of[fd] = -1; }
+
+ if (was_listener)
+ _exit(0); /* conversion done – exit */
+ }
+ return real_close(fd);
+}
+"""
+
+
+
+if __name__ == "__main__":
+ import sys
+ result = run_soffice(sys.argv[1:])
+ sys.exit(result.returncode)
diff --git a/.claude/skills/pptx/scripts/office/unpack.py b/.claude/skills/pptx/scripts/office/unpack.py
new file mode 100644
index 0000000..0015253
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/unpack.py
@@ -0,0 +1,132 @@
+"""Unpack Office files (DOCX, PPTX, XLSX) for editing.
+
+Extracts the ZIP archive, pretty-prints XML files, and optionally:
+- Merges adjacent runs with identical formatting (DOCX only)
+- Simplifies adjacent tracked changes from same author (DOCX only)
+
+Usage:
+ python unpack.py [options]
+
+Examples:
+ python unpack.py document.docx unpacked/
+ python unpack.py presentation.pptx unpacked/
+ python unpack.py document.docx unpacked/ --merge-runs false
+"""
+
+import argparse
+import sys
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+from helpers.merge_runs import merge_runs as do_merge_runs
+from helpers.simplify_redlines import simplify_redlines as do_simplify_redlines
+
+SMART_QUOTE_REPLACEMENTS = {
+ "\u201c": "“",
+ "\u201d": "”",
+ "\u2018": "‘",
+ "\u2019": "’",
+}
+
+
+def unpack(
+ input_file: str,
+ output_directory: str,
+ merge_runs: bool = True,
+ simplify_redlines: bool = True,
+) -> tuple[None, str]:
+ input_path = Path(input_file)
+ output_path = Path(output_directory)
+ suffix = input_path.suffix.lower()
+
+ if not input_path.exists():
+ return None, f"Error: {input_file} does not exist"
+
+ if suffix not in {".docx", ".pptx", ".xlsx"}:
+ return None, f"Error: {input_file} must be a .docx, .pptx, or .xlsx file"
+
+ try:
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ with zipfile.ZipFile(input_path, "r") as zf:
+ zf.extractall(output_path)
+
+ xml_files = list(output_path.rglob("*.xml")) + list(output_path.rglob("*.rels"))
+ for xml_file in xml_files:
+ _pretty_print_xml(xml_file)
+
+ message = f"Unpacked {input_file} ({len(xml_files)} XML files)"
+
+ if suffix == ".docx":
+ if simplify_redlines:
+ simplify_count, _ = do_simplify_redlines(str(output_path))
+ message += f", simplified {simplify_count} tracked changes"
+
+ if merge_runs:
+ merge_count, _ = do_merge_runs(str(output_path))
+ message += f", merged {merge_count} runs"
+
+ for xml_file in xml_files:
+ _escape_smart_quotes(xml_file)
+
+ return None, message
+
+ except zipfile.BadZipFile:
+ return None, f"Error: {input_file} is not a valid Office file"
+ except Exception as e:
+ return None, f"Error unpacking: {e}"
+
+
+def _pretty_print_xml(xml_file: Path) -> None:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ xml_file.write_bytes(dom.toprettyxml(indent=" ", encoding="utf-8"))
+ except Exception:
+ pass
+
+
+def _escape_smart_quotes(xml_file: Path) -> None:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ for char, entity in SMART_QUOTE_REPLACEMENTS.items():
+ content = content.replace(char, entity)
+ xml_file.write_text(content, encoding="utf-8")
+ except Exception:
+ pass
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Unpack an Office file (DOCX, PPTX, XLSX) for editing"
+ )
+ parser.add_argument("input_file", help="Office file to unpack")
+ parser.add_argument("output_directory", help="Output directory")
+ parser.add_argument(
+ "--merge-runs",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Merge adjacent runs with identical formatting (DOCX only, default: true)",
+ )
+ parser.add_argument(
+ "--simplify-redlines",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Merge adjacent tracked changes from same author (DOCX only, default: true)",
+ )
+ args = parser.parse_args()
+
+ _, message = unpack(
+ args.input_file,
+ args.output_directory,
+ merge_runs=args.merge_runs,
+ simplify_redlines=args.simplify_redlines,
+ )
+ print(message)
+
+ if "Error" in message:
+ sys.exit(1)
diff --git a/.claude/skills/pptx/scripts/office/validate.py b/.claude/skills/pptx/scripts/office/validate.py
new file mode 100644
index 0000000..03b01f6
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/validate.py
@@ -0,0 +1,111 @@
+"""
+Command line tool to validate Office document XML files against XSD schemas and tracked changes.
+
+Usage:
+ python validate.py [--original ] [--auto-repair] [--author NAME]
+
+The first argument can be either:
+- An unpacked directory containing the Office document XML files
+- A packed Office file (.docx/.pptx/.xlsx) which will be unpacked to a temp directory
+
+Auto-repair fixes:
+- paraId/durableId values that exceed OOXML limits
+- Missing xml:space="preserve" on w:t elements with whitespace
+"""
+
+import argparse
+import sys
+import tempfile
+import zipfile
+from pathlib import Path
+
+from validators import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Validate Office document XML files")
+ parser.add_argument(
+ "path",
+ help="Path to unpacked directory or packed Office file (.docx/.pptx/.xlsx)",
+ )
+ parser.add_argument(
+ "--original",
+ required=False,
+ default=None,
+ help="Path to original file (.docx/.pptx/.xlsx). If omitted, all XSD errors are reported and redlining validation is skipped.",
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="Enable verbose output",
+ )
+ parser.add_argument(
+ "--auto-repair",
+ action="store_true",
+ help="Automatically repair common issues (hex IDs, whitespace preservation)",
+ )
+ parser.add_argument(
+ "--author",
+ default="Claude",
+ help="Author name for redlining validation (default: Claude)",
+ )
+ args = parser.parse_args()
+
+ path = Path(args.path)
+ assert path.exists(), f"Error: {path} does not exist"
+
+ original_file = None
+ if args.original:
+ original_file = Path(args.original)
+ assert original_file.is_file(), f"Error: {original_file} is not a file"
+ assert original_file.suffix.lower() in [".docx", ".pptx", ".xlsx"], (
+ f"Error: {original_file} must be a .docx, .pptx, or .xlsx file"
+ )
+
+ file_extension = (original_file or path).suffix.lower()
+ assert file_extension in [".docx", ".pptx", ".xlsx"], (
+ f"Error: Cannot determine file type from {path}. Use --original or provide a .docx/.pptx/.xlsx file."
+ )
+
+ if path.is_file() and path.suffix.lower() in [".docx", ".pptx", ".xlsx"]:
+ temp_dir = tempfile.mkdtemp()
+ with zipfile.ZipFile(path, "r") as zf:
+ zf.extractall(temp_dir)
+ unpacked_dir = Path(temp_dir)
+ else:
+ assert path.is_dir(), f"Error: {path} is not a directory or Office file"
+ unpacked_dir = path
+
+ match file_extension:
+ case ".docx":
+ validators = [
+ DOCXSchemaValidator(unpacked_dir, original_file, verbose=args.verbose),
+ ]
+ if original_file:
+ validators.append(
+ RedliningValidator(unpacked_dir, original_file, verbose=args.verbose, author=args.author)
+ )
+ case ".pptx":
+ validators = [
+ PPTXSchemaValidator(unpacked_dir, original_file, verbose=args.verbose),
+ ]
+ case _:
+ print(f"Error: Validation not supported for file type {file_extension}")
+ sys.exit(1)
+
+ if args.auto_repair:
+ total_repairs = sum(v.repair() for v in validators)
+ if total_repairs:
+ print(f"Auto-repaired {total_repairs} issue(s)")
+
+ success = all(v.validate() for v in validators)
+
+ if success:
+ print("All validations PASSED!")
+
+ sys.exit(0 if success else 1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/pptx/scripts/office/validators/__init__.py b/.claude/skills/pptx/scripts/office/validators/__init__.py
new file mode 100644
index 0000000..db092ec
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/validators/__init__.py
@@ -0,0 +1,15 @@
+"""
+Validation modules for Word document processing.
+"""
+
+from .base import BaseSchemaValidator
+from .docx import DOCXSchemaValidator
+from .pptx import PPTXSchemaValidator
+from .redlining import RedliningValidator
+
+__all__ = [
+ "BaseSchemaValidator",
+ "DOCXSchemaValidator",
+ "PPTXSchemaValidator",
+ "RedliningValidator",
+]
diff --git a/.claude/skills/pptx/scripts/office/validators/base.py b/.claude/skills/pptx/scripts/office/validators/base.py
new file mode 100644
index 0000000..db4a06a
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/validators/base.py
@@ -0,0 +1,847 @@
+"""
+Base validator with common validation logic for document files.
+"""
+
+import re
+from pathlib import Path
+
+import defusedxml.minidom
+import lxml.etree
+
+
+class BaseSchemaValidator:
+
+ IGNORED_VALIDATION_ERRORS = [
+ "hyphenationZone",
+ "purl.org/dc/terms",
+ ]
+
+ UNIQUE_ID_REQUIREMENTS = {
+ "comment": ("id", "file"),
+ "commentrangestart": ("id", "file"),
+ "commentrangeend": ("id", "file"),
+ "bookmarkstart": ("id", "file"),
+ "bookmarkend": ("id", "file"),
+ "sldid": ("id", "file"),
+ "sldmasterid": ("id", "global"),
+ "sldlayoutid": ("id", "global"),
+ "cm": ("authorid", "file"),
+ "sheet": ("sheetid", "file"),
+ "definedname": ("id", "file"),
+ "cxnsp": ("id", "file"),
+ "sp": ("id", "file"),
+ "pic": ("id", "file"),
+ "grpsp": ("id", "file"),
+ }
+
+ EXCLUDED_ID_CONTAINERS = {
+ "sectionlst",
+ }
+
+ ELEMENT_RELATIONSHIP_TYPES = {}
+
+ SCHEMA_MAPPINGS = {
+ "word": "ISO-IEC29500-4_2016/wml.xsd",
+ "ppt": "ISO-IEC29500-4_2016/pml.xsd",
+ "xl": "ISO-IEC29500-4_2016/sml.xsd",
+ "[Content_Types].xml": "ecma/fouth-edition/opc-contentTypes.xsd",
+ "app.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd",
+ "core.xml": "ecma/fouth-edition/opc-coreProperties.xsd",
+ "custom.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd",
+ ".rels": "ecma/fouth-edition/opc-relationships.xsd",
+ "people.xml": "microsoft/wml-2012.xsd",
+ "commentsIds.xml": "microsoft/wml-cid-2016.xsd",
+ "commentsExtensible.xml": "microsoft/wml-cex-2018.xsd",
+ "commentsExtended.xml": "microsoft/wml-2012.xsd",
+ "chart": "ISO-IEC29500-4_2016/dml-chart.xsd",
+ "theme": "ISO-IEC29500-4_2016/dml-main.xsd",
+ "drawing": "ISO-IEC29500-4_2016/dml-main.xsd",
+ }
+
+ MC_NAMESPACE = "http://schemas.openxmlformats.org/markup-compatibility/2006"
+ XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
+
+ PACKAGE_RELATIONSHIPS_NAMESPACE = (
+ "http://schemas.openxmlformats.org/package/2006/relationships"
+ )
+ OFFICE_RELATIONSHIPS_NAMESPACE = (
+ "http://schemas.openxmlformats.org/officeDocument/2006/relationships"
+ )
+ CONTENT_TYPES_NAMESPACE = (
+ "http://schemas.openxmlformats.org/package/2006/content-types"
+ )
+
+ MAIN_CONTENT_FOLDERS = {"word", "ppt", "xl"}
+
+ OOXML_NAMESPACES = {
+ "http://schemas.openxmlformats.org/officeDocument/2006/math",
+ "http://schemas.openxmlformats.org/officeDocument/2006/relationships",
+ "http://schemas.openxmlformats.org/schemaLibrary/2006/main",
+ "http://schemas.openxmlformats.org/drawingml/2006/main",
+ "http://schemas.openxmlformats.org/drawingml/2006/chart",
+ "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing",
+ "http://schemas.openxmlformats.org/drawingml/2006/diagram",
+ "http://schemas.openxmlformats.org/drawingml/2006/picture",
+ "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing",
+ "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing",
+ "http://schemas.openxmlformats.org/wordprocessingml/2006/main",
+ "http://schemas.openxmlformats.org/presentationml/2006/main",
+ "http://schemas.openxmlformats.org/spreadsheetml/2006/main",
+ "http://schemas.openxmlformats.org/officeDocument/2006/sharedTypes",
+ "http://www.w3.org/XML/1998/namespace",
+ }
+
+ def __init__(self, unpacked_dir, original_file=None, verbose=False):
+ self.unpacked_dir = Path(unpacked_dir).resolve()
+ self.original_file = Path(original_file) if original_file else None
+ self.verbose = verbose
+
+ self.schemas_dir = Path(__file__).parent.parent / "schemas"
+
+ patterns = ["*.xml", "*.rels"]
+ self.xml_files = [
+ f for pattern in patterns for f in self.unpacked_dir.rglob(pattern)
+ ]
+
+ if not self.xml_files:
+ print(f"Warning: No XML files found in {self.unpacked_dir}")
+
+ def validate(self):
+ raise NotImplementedError("Subclasses must implement the validate method")
+
+ def repair(self) -> int:
+ return self.repair_whitespace_preservation()
+
+ def repair_whitespace_preservation(self) -> int:
+ repairs = 0
+
+ for xml_file in self.xml_files:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ modified = False
+
+ for elem in dom.getElementsByTagName("*"):
+ if elem.tagName.endswith(":t") and elem.firstChild:
+ text = elem.firstChild.nodeValue
+ if text and (text.startswith((' ', '\t')) or text.endswith((' ', '\t'))):
+ if elem.getAttribute("xml:space") != "preserve":
+ elem.setAttribute("xml:space", "preserve")
+ text_preview = repr(text[:30]) + "..." if len(text) > 30 else repr(text)
+ print(f" Repaired: {xml_file.name}: Added xml:space='preserve' to {elem.tagName}: {text_preview}")
+ repairs += 1
+ modified = True
+
+ if modified:
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+
+ except Exception:
+ pass
+
+ return repairs
+
+ def validate_xml(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ try:
+ lxml.etree.parse(str(xml_file))
+ except lxml.etree.XMLSyntaxError as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {e.lineno}: {e.msg}"
+ )
+ except Exception as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Unexpected error: {str(e)}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} XML violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All XML files are well-formed")
+ return True
+
+ def validate_namespaces(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ declared = set(root.nsmap.keys()) - {None}
+
+ for attr_val in [
+ v for k, v in root.attrib.items() if k.endswith("Ignorable")
+ ]:
+ undeclared = set(attr_val.split()) - declared
+ errors.extend(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Namespace '{ns}' in Ignorable but not declared"
+ for ns in undeclared
+ )
+ except lxml.etree.XMLSyntaxError:
+ continue
+
+ if errors:
+ print(f"FAILED - {len(errors)} namespace issues:")
+ for error in errors:
+ print(error)
+ return False
+ if self.verbose:
+ print("PASSED - All namespace prefixes properly declared")
+ return True
+
+ def validate_unique_ids(self):
+ errors = []
+ global_ids = {}
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ file_ids = {}
+
+ mc_elements = root.xpath(
+ ".//mc:AlternateContent", namespaces={"mc": self.MC_NAMESPACE}
+ )
+ for elem in mc_elements:
+ elem.getparent().remove(elem)
+
+ for elem in root.iter():
+ tag = (
+ elem.tag.split("}")[-1].lower()
+ if "}" in elem.tag
+ else elem.tag.lower()
+ )
+
+ if tag in self.UNIQUE_ID_REQUIREMENTS:
+ in_excluded_container = any(
+ ancestor.tag.split("}")[-1].lower() in self.EXCLUDED_ID_CONTAINERS
+ for ancestor in elem.iterancestors()
+ )
+ if in_excluded_container:
+ continue
+
+ attr_name, scope = self.UNIQUE_ID_REQUIREMENTS[tag]
+
+ id_value = None
+ for attr, value in elem.attrib.items():
+ attr_local = (
+ attr.split("}")[-1].lower()
+ if "}" in attr
+ else attr.lower()
+ )
+ if attr_local == attr_name:
+ id_value = value
+ break
+
+ if id_value is not None:
+ if scope == "global":
+ if id_value in global_ids:
+ prev_file, prev_line, prev_tag = global_ids[
+ id_value
+ ]
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: Global ID '{id_value}' in <{tag}> "
+ f"already used in {prev_file} at line {prev_line} in <{prev_tag}>"
+ )
+ else:
+ global_ids[id_value] = (
+ xml_file.relative_to(self.unpacked_dir),
+ elem.sourceline,
+ tag,
+ )
+ elif scope == "file":
+ key = (tag, attr_name)
+ if key not in file_ids:
+ file_ids[key] = {}
+
+ if id_value in file_ids[key]:
+ prev_line = file_ids[key][id_value]
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: Duplicate {attr_name}='{id_value}' in <{tag}> "
+ f"(first occurrence at line {prev_line})"
+ )
+ else:
+ file_ids[key][id_value] = elem.sourceline
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} ID uniqueness violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All required IDs are unique")
+ return True
+
+ def validate_file_references(self):
+ errors = []
+
+ rels_files = list(self.unpacked_dir.rglob("*.rels"))
+
+ if not rels_files:
+ if self.verbose:
+ print("PASSED - No .rels files found")
+ return True
+
+ all_files = []
+ for file_path in self.unpacked_dir.rglob("*"):
+ if (
+ file_path.is_file()
+ and file_path.name != "[Content_Types].xml"
+ and not file_path.name.endswith(".rels")
+ ):
+ all_files.append(file_path.resolve())
+
+ all_referenced_files = set()
+
+ if self.verbose:
+ print(
+ f"Found {len(rels_files)} .rels files and {len(all_files)} target files"
+ )
+
+ for rels_file in rels_files:
+ try:
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+
+ rels_dir = rels_file.parent
+
+ referenced_files = set()
+ broken_refs = []
+
+ for rel in rels_root.findall(
+ ".//ns:Relationship",
+ namespaces={"ns": self.PACKAGE_RELATIONSHIPS_NAMESPACE},
+ ):
+ target = rel.get("Target")
+ if target and not target.startswith(
+ ("http", "mailto:")
+ ):
+ if target.startswith("/"):
+ target_path = self.unpacked_dir / target.lstrip("/")
+ elif rels_file.name == ".rels":
+ target_path = self.unpacked_dir / target
+ else:
+ base_dir = rels_dir.parent
+ target_path = base_dir / target
+
+ try:
+ target_path = target_path.resolve()
+ if target_path.exists() and target_path.is_file():
+ referenced_files.add(target_path)
+ all_referenced_files.add(target_path)
+ else:
+ broken_refs.append((target, rel.sourceline))
+ except (OSError, ValueError):
+ broken_refs.append((target, rel.sourceline))
+
+ if broken_refs:
+ rel_path = rels_file.relative_to(self.unpacked_dir)
+ for broken_ref, line_num in broken_refs:
+ errors.append(
+ f" {rel_path}: Line {line_num}: Broken reference to {broken_ref}"
+ )
+
+ except Exception as e:
+ rel_path = rels_file.relative_to(self.unpacked_dir)
+ errors.append(f" Error parsing {rel_path}: {e}")
+
+ unreferenced_files = set(all_files) - all_referenced_files
+
+ if unreferenced_files:
+ for unref_file in sorted(unreferenced_files):
+ unref_rel_path = unref_file.relative_to(self.unpacked_dir)
+ errors.append(f" Unreferenced file: {unref_rel_path}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} relationship validation errors:")
+ for error in errors:
+ print(error)
+ print(
+ "CRITICAL: These errors will cause the document to appear corrupt. "
+ + "Broken references MUST be fixed, "
+ + "and unreferenced files MUST be referenced or removed."
+ )
+ return False
+ else:
+ if self.verbose:
+ print(
+ "PASSED - All references are valid and all files are properly referenced"
+ )
+ return True
+
+ def validate_all_relationship_ids(self):
+ import lxml.etree
+
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.suffix == ".rels":
+ continue
+
+ rels_dir = xml_file.parent / "_rels"
+ rels_file = rels_dir / f"{xml_file.name}.rels"
+
+ if not rels_file.exists():
+ continue
+
+ try:
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+ rid_to_type = {}
+
+ for rel in rels_root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ ):
+ rid = rel.get("Id")
+ rel_type = rel.get("Type", "")
+ if rid:
+ if rid in rid_to_type:
+ rels_rel_path = rels_file.relative_to(self.unpacked_dir)
+ errors.append(
+ f" {rels_rel_path}: Line {rel.sourceline}: "
+ f"Duplicate relationship ID '{rid}' (IDs must be unique)"
+ )
+ type_name = (
+ rel_type.split("/")[-1] if "/" in rel_type else rel_type
+ )
+ rid_to_type[rid] = type_name
+
+ xml_root = lxml.etree.parse(str(xml_file)).getroot()
+
+ r_ns = self.OFFICE_RELATIONSHIPS_NAMESPACE
+ rid_attrs_to_check = ["id", "embed", "link"]
+ for elem in xml_root.iter():
+ for attr_name in rid_attrs_to_check:
+ rid_attr = elem.get(f"{{{r_ns}}}{attr_name}")
+ if not rid_attr:
+ continue
+ xml_rel_path = xml_file.relative_to(self.unpacked_dir)
+ elem_name = (
+ elem.tag.split("}")[-1] if "}" in elem.tag else elem.tag
+ )
+
+ if rid_attr not in rid_to_type:
+ errors.append(
+ f" {xml_rel_path}: Line {elem.sourceline}: "
+ f"<{elem_name}> r:{attr_name} references non-existent relationship '{rid_attr}' "
+ f"(valid IDs: {', '.join(sorted(rid_to_type.keys())[:5])}{'...' if len(rid_to_type) > 5 else ''})"
+ )
+ elif attr_name == "id" and self.ELEMENT_RELATIONSHIP_TYPES:
+ expected_type = self._get_expected_relationship_type(
+ elem_name
+ )
+ if expected_type:
+ actual_type = rid_to_type[rid_attr]
+ if expected_type not in actual_type.lower():
+ errors.append(
+ f" {xml_rel_path}: Line {elem.sourceline}: "
+ f"<{elem_name}> references '{rid_attr}' which points to '{actual_type}' "
+ f"but should point to a '{expected_type}' relationship"
+ )
+
+ except Exception as e:
+ xml_rel_path = xml_file.relative_to(self.unpacked_dir)
+ errors.append(f" Error processing {xml_rel_path}: {e}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} relationship ID reference errors:")
+ for error in errors:
+ print(error)
+ print("\nThese ID mismatches will cause the document to appear corrupt!")
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All relationship ID references are valid")
+ return True
+
+ def _get_expected_relationship_type(self, element_name):
+ elem_lower = element_name.lower()
+
+ if elem_lower in self.ELEMENT_RELATIONSHIP_TYPES:
+ return self.ELEMENT_RELATIONSHIP_TYPES[elem_lower]
+
+ if elem_lower.endswith("id") and len(elem_lower) > 2:
+ prefix = elem_lower[:-2]
+ if prefix.endswith("master"):
+ return prefix.lower()
+ elif prefix.endswith("layout"):
+ return prefix.lower()
+ else:
+ if prefix == "sld":
+ return "slide"
+ return prefix.lower()
+
+ if elem_lower.endswith("reference") and len(elem_lower) > 9:
+ prefix = elem_lower[:-9]
+ return prefix.lower()
+
+ return None
+
+ def validate_content_types(self):
+ errors = []
+
+ content_types_file = self.unpacked_dir / "[Content_Types].xml"
+ if not content_types_file.exists():
+ print("FAILED - [Content_Types].xml file not found")
+ return False
+
+ try:
+ root = lxml.etree.parse(str(content_types_file)).getroot()
+ declared_parts = set()
+ declared_extensions = set()
+
+ for override in root.findall(
+ f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Override"
+ ):
+ part_name = override.get("PartName")
+ if part_name is not None:
+ declared_parts.add(part_name.lstrip("/"))
+
+ for default in root.findall(
+ f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Default"
+ ):
+ extension = default.get("Extension")
+ if extension is not None:
+ declared_extensions.add(extension.lower())
+
+ declarable_roots = {
+ "sld",
+ "sldLayout",
+ "sldMaster",
+ "presentation",
+ "document",
+ "workbook",
+ "worksheet",
+ "theme",
+ }
+
+ media_extensions = {
+ "png": "image/png",
+ "jpg": "image/jpeg",
+ "jpeg": "image/jpeg",
+ "gif": "image/gif",
+ "bmp": "image/bmp",
+ "tiff": "image/tiff",
+ "wmf": "image/x-wmf",
+ "emf": "image/x-emf",
+ }
+
+ all_files = list(self.unpacked_dir.rglob("*"))
+ all_files = [f for f in all_files if f.is_file()]
+
+ for xml_file in self.xml_files:
+ path_str = str(xml_file.relative_to(self.unpacked_dir)).replace(
+ "\\", "/"
+ )
+
+ if any(
+ skip in path_str
+ for skip in [".rels", "[Content_Types]", "docProps/", "_rels/"]
+ ):
+ continue
+
+ try:
+ root_tag = lxml.etree.parse(str(xml_file)).getroot().tag
+ root_name = root_tag.split("}")[-1] if "}" in root_tag else root_tag
+
+ if root_name in declarable_roots and path_str not in declared_parts:
+ errors.append(
+ f" {path_str}: File with <{root_name}> root not declared in [Content_Types].xml"
+ )
+
+ except Exception:
+ continue
+
+ for file_path in all_files:
+ if file_path.suffix.lower() in {".xml", ".rels"}:
+ continue
+ if file_path.name == "[Content_Types].xml":
+ continue
+ if "_rels" in file_path.parts or "docProps" in file_path.parts:
+ continue
+
+ extension = file_path.suffix.lstrip(".").lower()
+ if extension and extension not in declared_extensions:
+ if extension in media_extensions:
+ relative_path = file_path.relative_to(self.unpacked_dir)
+ errors.append(
+ f' {relative_path}: File with extension \'{extension}\' not declared in [Content_Types].xml - should add: '
+ )
+
+ except Exception as e:
+ errors.append(f" Error parsing [Content_Types].xml: {e}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} content type declaration errors:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print(
+ "PASSED - All content files are properly declared in [Content_Types].xml"
+ )
+ return True
+
+ def validate_file_against_xsd(self, xml_file, verbose=False):
+ xml_file = Path(xml_file).resolve()
+ unpacked_dir = self.unpacked_dir.resolve()
+
+ is_valid, current_errors = self._validate_single_file_xsd(
+ xml_file, unpacked_dir
+ )
+
+ if is_valid is None:
+ return None, set()
+ elif is_valid:
+ return True, set()
+
+ original_errors = self._get_original_file_errors(xml_file)
+
+ assert current_errors is not None
+ new_errors = current_errors - original_errors
+
+ new_errors = {
+ e for e in new_errors
+ if not any(pattern in e for pattern in self.IGNORED_VALIDATION_ERRORS)
+ }
+
+ if new_errors:
+ if verbose:
+ relative_path = xml_file.relative_to(unpacked_dir)
+ print(f"FAILED - {relative_path}: {len(new_errors)} new error(s)")
+ for error in list(new_errors)[:3]:
+ truncated = error[:250] + "..." if len(error) > 250 else error
+ print(f" - {truncated}")
+ return False, new_errors
+ else:
+ if verbose:
+ print(
+ f"PASSED - No new errors (original had {len(current_errors)} errors)"
+ )
+ return True, set()
+
+ def validate_against_xsd(self):
+ new_errors = []
+ original_error_count = 0
+ valid_count = 0
+ skipped_count = 0
+
+ for xml_file in self.xml_files:
+ relative_path = str(xml_file.relative_to(self.unpacked_dir))
+ is_valid, new_file_errors = self.validate_file_against_xsd(
+ xml_file, verbose=False
+ )
+
+ if is_valid is None:
+ skipped_count += 1
+ continue
+ elif is_valid and not new_file_errors:
+ valid_count += 1
+ continue
+ elif is_valid:
+ original_error_count += 1
+ valid_count += 1
+ continue
+
+ new_errors.append(f" {relative_path}: {len(new_file_errors)} new error(s)")
+ for error in list(new_file_errors)[:3]:
+ new_errors.append(
+ f" - {error[:250]}..." if len(error) > 250 else f" - {error}"
+ )
+
+ if self.verbose:
+ print(f"Validated {len(self.xml_files)} files:")
+ print(f" - Valid: {valid_count}")
+ print(f" - Skipped (no schema): {skipped_count}")
+ if original_error_count:
+ print(f" - With original errors (ignored): {original_error_count}")
+ print(
+ f" - With NEW errors: {len(new_errors) > 0 and len([e for e in new_errors if not e.startswith(' ')]) or 0}"
+ )
+
+ if new_errors:
+ print("\nFAILED - Found NEW validation errors:")
+ for error in new_errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("\nPASSED - No new XSD validation errors introduced")
+ return True
+
+ def _get_schema_path(self, xml_file):
+ if xml_file.name in self.SCHEMA_MAPPINGS:
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.name]
+
+ if xml_file.suffix == ".rels":
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[".rels"]
+
+ if "charts/" in str(xml_file) and xml_file.name.startswith("chart"):
+ return self.schemas_dir / self.SCHEMA_MAPPINGS["chart"]
+
+ if "theme/" in str(xml_file) and xml_file.name.startswith("theme"):
+ return self.schemas_dir / self.SCHEMA_MAPPINGS["theme"]
+
+ if xml_file.parent.name in self.MAIN_CONTENT_FOLDERS:
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.parent.name]
+
+ return None
+
+ def _clean_ignorable_namespaces(self, xml_doc):
+ xml_string = lxml.etree.tostring(xml_doc, encoding="unicode")
+ xml_copy = lxml.etree.fromstring(xml_string)
+
+ for elem in xml_copy.iter():
+ attrs_to_remove = []
+
+ for attr in elem.attrib:
+ if "{" in attr:
+ ns = attr.split("}")[0][1:]
+ if ns not in self.OOXML_NAMESPACES:
+ attrs_to_remove.append(attr)
+
+ for attr in attrs_to_remove:
+ del elem.attrib[attr]
+
+ self._remove_ignorable_elements(xml_copy)
+
+ return lxml.etree.ElementTree(xml_copy)
+
+ def _remove_ignorable_elements(self, root):
+ elements_to_remove = []
+
+ for elem in list(root):
+ if not hasattr(elem, "tag") or callable(elem.tag):
+ continue
+
+ tag_str = str(elem.tag)
+ if tag_str.startswith("{"):
+ ns = tag_str.split("}")[0][1:]
+ if ns not in self.OOXML_NAMESPACES:
+ elements_to_remove.append(elem)
+ continue
+
+ self._remove_ignorable_elements(elem)
+
+ for elem in elements_to_remove:
+ root.remove(elem)
+
+ def _preprocess_for_mc_ignorable(self, xml_doc):
+ root = xml_doc.getroot()
+
+ if f"{{{self.MC_NAMESPACE}}}Ignorable" in root.attrib:
+ del root.attrib[f"{{{self.MC_NAMESPACE}}}Ignorable"]
+
+ return xml_doc
+
+ def _validate_single_file_xsd(self, xml_file, base_path):
+ schema_path = self._get_schema_path(xml_file)
+ if not schema_path:
+ return None, None
+
+ try:
+ with open(schema_path, "rb") as xsd_file:
+ parser = lxml.etree.XMLParser()
+ xsd_doc = lxml.etree.parse(
+ xsd_file, parser=parser, base_url=str(schema_path)
+ )
+ schema = lxml.etree.XMLSchema(xsd_doc)
+
+ with open(xml_file, "r") as f:
+ xml_doc = lxml.etree.parse(f)
+
+ xml_doc, _ = self._remove_template_tags_from_text_nodes(xml_doc)
+ xml_doc = self._preprocess_for_mc_ignorable(xml_doc)
+
+ relative_path = xml_file.relative_to(base_path)
+ if (
+ relative_path.parts
+ and relative_path.parts[0] in self.MAIN_CONTENT_FOLDERS
+ ):
+ xml_doc = self._clean_ignorable_namespaces(xml_doc)
+
+ if schema.validate(xml_doc):
+ return True, set()
+ else:
+ errors = set()
+ for error in schema.error_log:
+ errors.add(error.message)
+ return False, errors
+
+ except Exception as e:
+ return False, {str(e)}
+
+ def _get_original_file_errors(self, xml_file):
+ if self.original_file is None:
+ return set()
+
+ import tempfile
+ import zipfile
+
+ xml_file = Path(xml_file).resolve()
+ unpacked_dir = self.unpacked_dir.resolve()
+ relative_path = xml_file.relative_to(unpacked_dir)
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+
+ with zipfile.ZipFile(self.original_file, "r") as zip_ref:
+ zip_ref.extractall(temp_path)
+
+ original_xml_file = temp_path / relative_path
+
+ if not original_xml_file.exists():
+ return set()
+
+ is_valid, errors = self._validate_single_file_xsd(
+ original_xml_file, temp_path
+ )
+ return errors if errors else set()
+
+ def _remove_template_tags_from_text_nodes(self, xml_doc):
+ warnings = []
+ template_pattern = re.compile(r"\{\{[^}]*\}\}")
+
+ xml_string = lxml.etree.tostring(xml_doc, encoding="unicode")
+ xml_copy = lxml.etree.fromstring(xml_string)
+
+ def process_text_content(text, content_type):
+ if not text:
+ return text
+ matches = list(template_pattern.finditer(text))
+ if matches:
+ for match in matches:
+ warnings.append(
+ f"Found template tag in {content_type}: {match.group()}"
+ )
+ return template_pattern.sub("", text)
+ return text
+
+ for elem in xml_copy.iter():
+ if not hasattr(elem, "tag") or callable(elem.tag):
+ continue
+ tag_str = str(elem.tag)
+ if tag_str.endswith("}t") or tag_str == "t":
+ continue
+
+ elem.text = process_text_content(elem.text, "text content")
+ elem.tail = process_text_content(elem.tail, "tail content")
+
+ return lxml.etree.ElementTree(xml_copy), warnings
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/pptx/scripts/office/validators/docx.py b/.claude/skills/pptx/scripts/office/validators/docx.py
new file mode 100644
index 0000000..fec405e
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/validators/docx.py
@@ -0,0 +1,446 @@
+"""
+Validator for Word document XML files against XSD schemas.
+"""
+
+import random
+import re
+import tempfile
+import zipfile
+
+import defusedxml.minidom
+import lxml.etree
+
+from .base import BaseSchemaValidator
+
+
+class DOCXSchemaValidator(BaseSchemaValidator):
+
+ WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+ W14_NAMESPACE = "http://schemas.microsoft.com/office/word/2010/wordml"
+ W16CID_NAMESPACE = "http://schemas.microsoft.com/office/word/2016/wordml/cid"
+
+ ELEMENT_RELATIONSHIP_TYPES = {}
+
+ def validate(self):
+ if not self.validate_xml():
+ return False
+
+ all_valid = True
+ if not self.validate_namespaces():
+ all_valid = False
+
+ if not self.validate_unique_ids():
+ all_valid = False
+
+ if not self.validate_file_references():
+ all_valid = False
+
+ if not self.validate_content_types():
+ all_valid = False
+
+ if not self.validate_against_xsd():
+ all_valid = False
+
+ if not self.validate_whitespace_preservation():
+ all_valid = False
+
+ if not self.validate_deletions():
+ all_valid = False
+
+ if not self.validate_insertions():
+ all_valid = False
+
+ if not self.validate_all_relationship_ids():
+ all_valid = False
+
+ if not self.validate_id_constraints():
+ all_valid = False
+
+ if not self.validate_comment_markers():
+ all_valid = False
+
+ self.compare_paragraph_counts()
+
+ return all_valid
+
+ def validate_whitespace_preservation(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+
+ for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"):
+ if elem.text:
+ text = elem.text
+ if re.search(r"^[ \t\n\r]", text) or re.search(
+ r"[ \t\n\r]$", text
+ ):
+ xml_space_attr = f"{{{self.XML_NAMESPACE}}}space"
+ if (
+ xml_space_attr not in elem.attrib
+ or elem.attrib[xml_space_attr] != "preserve"
+ ):
+ text_preview = (
+ repr(text)[:50] + "..."
+ if len(repr(text)) > 50
+ else repr(text)
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} whitespace preservation violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All whitespace is properly preserved")
+ return True
+
+ def validate_deletions(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ namespaces = {"w": self.WORD_2006_NAMESPACE}
+
+ for t_elem in root.xpath(".//w:del//w:t", namespaces=namespaces):
+ if t_elem.text:
+ text_preview = (
+ repr(t_elem.text)[:50] + "..."
+ if len(repr(t_elem.text)) > 50
+ else repr(t_elem.text)
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {t_elem.sourceline}: found within : {text_preview}"
+ )
+
+ for instr_elem in root.xpath(
+ ".//w:del//w:instrText", namespaces=namespaces
+ ):
+ text_preview = (
+ repr(instr_elem.text or "")[:50] + "..."
+ if len(repr(instr_elem.text or "")) > 50
+ else repr(instr_elem.text or "")
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {instr_elem.sourceline}: found within (use ): {text_preview}"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} deletion validation violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - No w:t elements found within w:del elements")
+ return True
+
+ def count_paragraphs_in_unpacked(self):
+ count = 0
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p")
+ count = len(paragraphs)
+ except Exception as e:
+ print(f"Error counting paragraphs in unpacked document: {e}")
+
+ return count
+
+ def count_paragraphs_in_original(self):
+ original = self.original_file
+ if original is None:
+ return 0
+
+ count = 0
+
+ try:
+ with tempfile.TemporaryDirectory() as temp_dir:
+ with zipfile.ZipFile(original, "r") as zip_ref:
+ zip_ref.extractall(temp_dir)
+
+ doc_xml_path = temp_dir + "/word/document.xml"
+ root = lxml.etree.parse(doc_xml_path).getroot()
+
+ paragraphs = root.findall(f".//{{{self.WORD_2006_NAMESPACE}}}p")
+ count = len(paragraphs)
+
+ except Exception as e:
+ print(f"Error counting paragraphs in original document: {e}")
+
+ return count
+
+ def validate_insertions(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ namespaces = {"w": self.WORD_2006_NAMESPACE}
+
+ invalid_elements = root.xpath(
+ ".//w:ins//w:delText[not(ancestor::w:del)]", namespaces=namespaces
+ )
+
+ for elem in invalid_elements:
+ text_preview = (
+ repr(elem.text or "")[:50] + "..."
+ if len(repr(elem.text or "")) > 50
+ else repr(elem.text or "")
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: within : {text_preview}"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} insertion validation violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - No w:delText elements within w:ins elements")
+ return True
+
+ def compare_paragraph_counts(self):
+ original_count = self.count_paragraphs_in_original()
+ new_count = self.count_paragraphs_in_unpacked()
+
+ diff = new_count - original_count
+ diff_str = f"+{diff}" if diff > 0 else str(diff)
+ print(f"\nParagraphs: {original_count} → {new_count} ({diff_str})")
+
+ def _parse_id_value(self, val: str, base: int = 16) -> int:
+ return int(val, base)
+
+ def validate_id_constraints(self):
+ errors = []
+ para_id_attr = f"{{{self.W14_NAMESPACE}}}paraId"
+ durable_id_attr = f"{{{self.W16CID_NAMESPACE}}}durableId"
+
+ for xml_file in self.xml_files:
+ try:
+ for elem in lxml.etree.parse(str(xml_file)).iter():
+ if val := elem.get(para_id_attr):
+ if self._parse_id_value(val, base=16) >= 0x80000000:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: paraId={val} >= 0x80000000"
+ )
+
+ if val := elem.get(durable_id_attr):
+ if xml_file.name == "numbering.xml":
+ try:
+ if self._parse_id_value(val, base=10) >= 0x7FFFFFFF:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: "
+ f"durableId={val} >= 0x7FFFFFFF"
+ )
+ except ValueError:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: "
+ f"durableId={val} must be decimal in numbering.xml"
+ )
+ else:
+ if self._parse_id_value(val, base=16) >= 0x7FFFFFFF:
+ errors.append(
+ f" {xml_file.name}:{elem.sourceline}: "
+ f"durableId={val} >= 0x7FFFFFFF"
+ )
+ except Exception:
+ pass
+
+ if errors:
+ print(f"FAILED - {len(errors)} ID constraint violations:")
+ for e in errors:
+ print(e)
+ elif self.verbose:
+ print("PASSED - All paraId/durableId values within constraints")
+ return not errors
+
+ def validate_comment_markers(self):
+ errors = []
+
+ document_xml = None
+ comments_xml = None
+ for xml_file in self.xml_files:
+ if xml_file.name == "document.xml" and "word" in str(xml_file):
+ document_xml = xml_file
+ elif xml_file.name == "comments.xml":
+ comments_xml = xml_file
+
+ if not document_xml:
+ if self.verbose:
+ print("PASSED - No document.xml found (skipping comment validation)")
+ return True
+
+ try:
+ doc_root = lxml.etree.parse(str(document_xml)).getroot()
+ namespaces = {"w": self.WORD_2006_NAMESPACE}
+
+ range_starts = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in doc_root.xpath(
+ ".//w:commentRangeStart", namespaces=namespaces
+ )
+ }
+ range_ends = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in doc_root.xpath(
+ ".//w:commentRangeEnd", namespaces=namespaces
+ )
+ }
+ references = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in doc_root.xpath(
+ ".//w:commentReference", namespaces=namespaces
+ )
+ }
+
+ orphaned_ends = range_ends - range_starts
+ for comment_id in sorted(
+ orphaned_ends, key=lambda x: int(x) if x and x.isdigit() else 0
+ ):
+ errors.append(
+ f' document.xml: commentRangeEnd id="{comment_id}" has no matching commentRangeStart'
+ )
+
+ orphaned_starts = range_starts - range_ends
+ for comment_id in sorted(
+ orphaned_starts, key=lambda x: int(x) if x and x.isdigit() else 0
+ ):
+ errors.append(
+ f' document.xml: commentRangeStart id="{comment_id}" has no matching commentRangeEnd'
+ )
+
+ comment_ids = set()
+ if comments_xml and comments_xml.exists():
+ comments_root = lxml.etree.parse(str(comments_xml)).getroot()
+ comment_ids = {
+ elem.get(f"{{{self.WORD_2006_NAMESPACE}}}id")
+ for elem in comments_root.xpath(
+ ".//w:comment", namespaces=namespaces
+ )
+ }
+
+ marker_ids = range_starts | range_ends | references
+ invalid_refs = marker_ids - comment_ids
+ for comment_id in sorted(
+ invalid_refs, key=lambda x: int(x) if x and x.isdigit() else 0
+ ):
+ if comment_id:
+ errors.append(
+ f' document.xml: marker id="{comment_id}" references non-existent comment'
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(f" Error parsing XML: {e}")
+
+ if errors:
+ print(f"FAILED - {len(errors)} comment marker violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All comment markers properly paired")
+ return True
+
+ def repair(self) -> int:
+ repairs = super().repair()
+ repairs += self.repair_durableId()
+ return repairs
+
+ def repair_durableId(self) -> int:
+ repairs = 0
+
+ for xml_file in self.xml_files:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ modified = False
+
+ for elem in dom.getElementsByTagName("*"):
+ if not elem.hasAttribute("w16cid:durableId"):
+ continue
+
+ durable_id = elem.getAttribute("w16cid:durableId")
+ needs_repair = False
+
+ if xml_file.name == "numbering.xml":
+ try:
+ needs_repair = (
+ self._parse_id_value(durable_id, base=10) >= 0x7FFFFFFF
+ )
+ except ValueError:
+ needs_repair = True
+ else:
+ try:
+ needs_repair = (
+ self._parse_id_value(durable_id, base=16) >= 0x7FFFFFFF
+ )
+ except ValueError:
+ needs_repair = True
+
+ if needs_repair:
+ value = random.randint(1, 0x7FFFFFFE)
+ if xml_file.name == "numbering.xml":
+ new_id = str(value)
+ else:
+ new_id = f"{value:08X}"
+
+ elem.setAttribute("w16cid:durableId", new_id)
+ print(
+ f" Repaired: {xml_file.name}: durableId {durable_id} → {new_id}"
+ )
+ repairs += 1
+ modified = True
+
+ if modified:
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+
+ except Exception:
+ pass
+
+ return repairs
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/pptx/scripts/office/validators/pptx.py b/.claude/skills/pptx/scripts/office/validators/pptx.py
new file mode 100644
index 0000000..09842aa
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/validators/pptx.py
@@ -0,0 +1,275 @@
+"""
+Validator for PowerPoint presentation XML files against XSD schemas.
+"""
+
+import re
+
+from .base import BaseSchemaValidator
+
+
+class PPTXSchemaValidator(BaseSchemaValidator):
+
+ PRESENTATIONML_NAMESPACE = (
+ "http://schemas.openxmlformats.org/presentationml/2006/main"
+ )
+
+ ELEMENT_RELATIONSHIP_TYPES = {
+ "sldid": "slide",
+ "sldmasterid": "slidemaster",
+ "notesmasterid": "notesmaster",
+ "sldlayoutid": "slidelayout",
+ "themeid": "theme",
+ "tablestyleid": "tablestyles",
+ }
+
+ def validate(self):
+ if not self.validate_xml():
+ return False
+
+ all_valid = True
+ if not self.validate_namespaces():
+ all_valid = False
+
+ if not self.validate_unique_ids():
+ all_valid = False
+
+ if not self.validate_uuid_ids():
+ all_valid = False
+
+ if not self.validate_file_references():
+ all_valid = False
+
+ if not self.validate_slide_layout_ids():
+ all_valid = False
+
+ if not self.validate_content_types():
+ all_valid = False
+
+ if not self.validate_against_xsd():
+ all_valid = False
+
+ if not self.validate_notes_slide_references():
+ all_valid = False
+
+ if not self.validate_all_relationship_ids():
+ all_valid = False
+
+ if not self.validate_no_duplicate_slide_layouts():
+ all_valid = False
+
+ return all_valid
+
+ def validate_uuid_ids(self):
+ import lxml.etree
+
+ errors = []
+ uuid_pattern = re.compile(
+ r"^[\{\(]?[0-9A-Fa-f]{8}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{4}-?[0-9A-Fa-f]{12}[\}\)]?$"
+ )
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+
+ for elem in root.iter():
+ for attr, value in elem.attrib.items():
+ attr_name = attr.split("}")[-1].lower()
+ if attr_name == "id" or attr_name.endswith("id"):
+ if self._looks_like_uuid(value):
+ if not uuid_pattern.match(value):
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: ID '{value}' appears to be a UUID but contains invalid hex characters"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} UUID ID validation errors:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All UUID-like IDs contain valid hex values")
+ return True
+
+ def _looks_like_uuid(self, value):
+ clean_value = value.strip("{}()").replace("-", "")
+ return len(clean_value) == 32 and all(c.isalnum() for c in clean_value)
+
+ def validate_slide_layout_ids(self):
+ import lxml.etree
+
+ errors = []
+
+ slide_masters = list(self.unpacked_dir.glob("ppt/slideMasters/*.xml"))
+
+ if not slide_masters:
+ if self.verbose:
+ print("PASSED - No slide masters found")
+ return True
+
+ for slide_master in slide_masters:
+ try:
+ root = lxml.etree.parse(str(slide_master)).getroot()
+
+ rels_file = slide_master.parent / "_rels" / f"{slide_master.name}.rels"
+
+ if not rels_file.exists():
+ errors.append(
+ f" {slide_master.relative_to(self.unpacked_dir)}: "
+ f"Missing relationships file: {rels_file.relative_to(self.unpacked_dir)}"
+ )
+ continue
+
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+
+ valid_layout_rids = set()
+ for rel in rels_root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ ):
+ rel_type = rel.get("Type", "")
+ if "slideLayout" in rel_type:
+ valid_layout_rids.add(rel.get("Id"))
+
+ for sld_layout_id in root.findall(
+ f".//{{{self.PRESENTATIONML_NAMESPACE}}}sldLayoutId"
+ ):
+ r_id = sld_layout_id.get(
+ f"{{{self.OFFICE_RELATIONSHIPS_NAMESPACE}}}id"
+ )
+ layout_id = sld_layout_id.get("id")
+
+ if r_id and r_id not in valid_layout_rids:
+ errors.append(
+ f" {slide_master.relative_to(self.unpacked_dir)}: "
+ f"Line {sld_layout_id.sourceline}: sldLayoutId with id='{layout_id}' "
+ f"references r:id='{r_id}' which is not found in slide layout relationships"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {slide_master.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} slide layout ID validation errors:")
+ for error in errors:
+ print(error)
+ print(
+ "Remove invalid references or add missing slide layouts to the relationships file."
+ )
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All slide layout IDs reference valid slide layouts")
+ return True
+
+ def validate_no_duplicate_slide_layouts(self):
+ import lxml.etree
+
+ errors = []
+ slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels"))
+
+ for rels_file in slide_rels_files:
+ try:
+ root = lxml.etree.parse(str(rels_file)).getroot()
+
+ layout_rels = [
+ rel
+ for rel in root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ )
+ if "slideLayout" in rel.get("Type", "")
+ ]
+
+ if len(layout_rels) > 1:
+ errors.append(
+ f" {rels_file.relative_to(self.unpacked_dir)}: has {len(layout_rels)} slideLayout references"
+ )
+
+ except Exception as e:
+ errors.append(
+ f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print("FAILED - Found slides with duplicate slideLayout references:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All slides have exactly one slideLayout reference")
+ return True
+
+ def validate_notes_slide_references(self):
+ import lxml.etree
+
+ errors = []
+ notes_slide_references = {}
+
+ slide_rels_files = list(self.unpacked_dir.glob("ppt/slides/_rels/*.xml.rels"))
+
+ if not slide_rels_files:
+ if self.verbose:
+ print("PASSED - No slide relationship files found")
+ return True
+
+ for rels_file in slide_rels_files:
+ try:
+ root = lxml.etree.parse(str(rels_file)).getroot()
+
+ for rel in root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ ):
+ rel_type = rel.get("Type", "")
+ if "notesSlide" in rel_type:
+ target = rel.get("Target", "")
+ if target:
+ normalized_target = target.replace("../", "")
+
+ slide_name = rels_file.stem.replace(
+ ".xml", ""
+ )
+
+ if normalized_target not in notes_slide_references:
+ notes_slide_references[normalized_target] = []
+ notes_slide_references[normalized_target].append(
+ (slide_name, rels_file)
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {rels_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ for target, references in notes_slide_references.items():
+ if len(references) > 1:
+ slide_names = [ref[0] for ref in references]
+ errors.append(
+ f" Notes slide '{target}' is referenced by multiple slides: {', '.join(slide_names)}"
+ )
+ for slide_name, rels_file in references:
+ errors.append(f" - {rels_file.relative_to(self.unpacked_dir)}")
+
+ if errors:
+ print(
+ f"FAILED - Found {len([e for e in errors if not e.startswith(' ')])} notes slide reference validation errors:"
+ )
+ for error in errors:
+ print(error)
+ print("Each slide may optionally have its own slide file.")
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All notes slide references are unique")
+ return True
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/pptx/scripts/office/validators/redlining.py b/.claude/skills/pptx/scripts/office/validators/redlining.py
new file mode 100644
index 0000000..71c81b6
--- /dev/null
+++ b/.claude/skills/pptx/scripts/office/validators/redlining.py
@@ -0,0 +1,247 @@
+"""
+Validator for tracked changes in Word documents.
+"""
+
+import subprocess
+import tempfile
+import zipfile
+from pathlib import Path
+
+
+class RedliningValidator:
+
+ def __init__(self, unpacked_dir, original_docx, verbose=False, author="Claude"):
+ self.unpacked_dir = Path(unpacked_dir)
+ self.original_docx = Path(original_docx)
+ self.verbose = verbose
+ self.author = author
+ self.namespaces = {
+ "w": "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+ }
+
+ def repair(self) -> int:
+ return 0
+
+ def validate(self):
+ modified_file = self.unpacked_dir / "word" / "document.xml"
+ if not modified_file.exists():
+ print(f"FAILED - Modified document.xml not found at {modified_file}")
+ return False
+
+ try:
+ import xml.etree.ElementTree as ET
+
+ tree = ET.parse(modified_file)
+ root = tree.getroot()
+
+ del_elements = root.findall(".//w:del", self.namespaces)
+ ins_elements = root.findall(".//w:ins", self.namespaces)
+
+ author_del_elements = [
+ elem
+ for elem in del_elements
+ if elem.get(f"{{{self.namespaces['w']}}}author") == self.author
+ ]
+ author_ins_elements = [
+ elem
+ for elem in ins_elements
+ if elem.get(f"{{{self.namespaces['w']}}}author") == self.author
+ ]
+
+ if not author_del_elements and not author_ins_elements:
+ if self.verbose:
+ print(f"PASSED - No tracked changes by {self.author} found.")
+ return True
+
+ except Exception:
+ pass
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+
+ try:
+ with zipfile.ZipFile(self.original_docx, "r") as zip_ref:
+ zip_ref.extractall(temp_path)
+ except Exception as e:
+ print(f"FAILED - Error unpacking original docx: {e}")
+ return False
+
+ original_file = temp_path / "word" / "document.xml"
+ if not original_file.exists():
+ print(
+ f"FAILED - Original document.xml not found in {self.original_docx}"
+ )
+ return False
+
+ try:
+ import xml.etree.ElementTree as ET
+
+ modified_tree = ET.parse(modified_file)
+ modified_root = modified_tree.getroot()
+ original_tree = ET.parse(original_file)
+ original_root = original_tree.getroot()
+ except ET.ParseError as e:
+ print(f"FAILED - Error parsing XML files: {e}")
+ return False
+
+ self._remove_author_tracked_changes(original_root)
+ self._remove_author_tracked_changes(modified_root)
+
+ modified_text = self._extract_text_content(modified_root)
+ original_text = self._extract_text_content(original_root)
+
+ if modified_text != original_text:
+ error_message = self._generate_detailed_diff(
+ original_text, modified_text
+ )
+ print(error_message)
+ return False
+
+ if self.verbose:
+ print(f"PASSED - All changes by {self.author} are properly tracked")
+ return True
+
+ def _generate_detailed_diff(self, original_text, modified_text):
+ error_parts = [
+ f"FAILED - Document text doesn't match after removing {self.author}'s tracked changes",
+ "",
+ "Likely causes:",
+ " 1. Modified text inside another author's or tags",
+ " 2. Made edits without proper tracked changes",
+ " 3. Didn't nest inside when deleting another's insertion",
+ "",
+ "For pre-redlined documents, use correct patterns:",
+ " - To reject another's INSERTION: Nest inside their ",
+ " - To restore another's DELETION: Add new AFTER their ",
+ "",
+ ]
+
+ git_diff = self._get_git_word_diff(original_text, modified_text)
+ if git_diff:
+ error_parts.extend(["Differences:", "============", git_diff])
+ else:
+ error_parts.append("Unable to generate word diff (git not available)")
+
+ return "\n".join(error_parts)
+
+ def _get_git_word_diff(self, original_text, modified_text):
+ try:
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+
+ original_file = temp_path / "original.txt"
+ modified_file = temp_path / "modified.txt"
+
+ original_file.write_text(original_text, encoding="utf-8")
+ modified_file.write_text(modified_text, encoding="utf-8")
+
+ result = subprocess.run(
+ [
+ "git",
+ "diff",
+ "--word-diff=plain",
+ "--word-diff-regex=.",
+ "-U0",
+ "--no-index",
+ str(original_file),
+ str(modified_file),
+ ],
+ capture_output=True,
+ text=True,
+ )
+
+ if result.stdout.strip():
+ lines = result.stdout.split("\n")
+ content_lines = []
+ in_content = False
+ for line in lines:
+ if line.startswith("@@"):
+ in_content = True
+ continue
+ if in_content and line.strip():
+ content_lines.append(line)
+
+ if content_lines:
+ return "\n".join(content_lines)
+
+ result = subprocess.run(
+ [
+ "git",
+ "diff",
+ "--word-diff=plain",
+ "-U0",
+ "--no-index",
+ str(original_file),
+ str(modified_file),
+ ],
+ capture_output=True,
+ text=True,
+ )
+
+ if result.stdout.strip():
+ lines = result.stdout.split("\n")
+ content_lines = []
+ in_content = False
+ for line in lines:
+ if line.startswith("@@"):
+ in_content = True
+ continue
+ if in_content and line.strip():
+ content_lines.append(line)
+ return "\n".join(content_lines)
+
+ except (subprocess.CalledProcessError, FileNotFoundError, Exception):
+ pass
+
+ return None
+
+ def _remove_author_tracked_changes(self, root):
+ ins_tag = f"{{{self.namespaces['w']}}}ins"
+ del_tag = f"{{{self.namespaces['w']}}}del"
+ author_attr = f"{{{self.namespaces['w']}}}author"
+
+ for parent in root.iter():
+ to_remove = []
+ for child in parent:
+ if child.tag == ins_tag and child.get(author_attr) == self.author:
+ to_remove.append(child)
+ for elem in to_remove:
+ parent.remove(elem)
+
+ deltext_tag = f"{{{self.namespaces['w']}}}delText"
+ t_tag = f"{{{self.namespaces['w']}}}t"
+
+ for parent in root.iter():
+ to_process = []
+ for child in parent:
+ if child.tag == del_tag and child.get(author_attr) == self.author:
+ to_process.append((child, list(parent).index(child)))
+
+ for del_elem, del_index in reversed(to_process):
+ for elem in del_elem.iter():
+ if elem.tag == deltext_tag:
+ elem.tag = t_tag
+
+ for child in reversed(list(del_elem)):
+ parent.insert(del_index, child)
+ parent.remove(del_elem)
+
+ def _extract_text_content(self, root):
+ p_tag = f"{{{self.namespaces['w']}}}p"
+ t_tag = f"{{{self.namespaces['w']}}}t"
+
+ paragraphs = []
+ for p_elem in root.findall(f".//{p_tag}"):
+ text_parts = []
+ for t_elem in p_elem.findall(f".//{t_tag}"):
+ if t_elem.text:
+ text_parts.append(t_elem.text)
+ paragraph_text = "".join(text_parts)
+ if paragraph_text:
+ paragraphs.append(paragraph_text)
+
+ return "\n".join(paragraphs)
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/pptx/scripts/thumbnail.py b/.claude/skills/pptx/scripts/thumbnail.py
new file mode 100644
index 0000000..edcbdc0
--- /dev/null
+++ b/.claude/skills/pptx/scripts/thumbnail.py
@@ -0,0 +1,289 @@
+"""Create thumbnail grids from PowerPoint presentation slides.
+
+Creates a grid layout of slide thumbnails for quick visual analysis.
+Labels each thumbnail with its XML filename (e.g., slide1.xml).
+Hidden slides are shown with a placeholder pattern.
+
+Usage:
+ python thumbnail.py input.pptx [output_prefix] [--cols N]
+
+Examples:
+ python thumbnail.py presentation.pptx
+ # Creates: thumbnails.jpg
+
+ python thumbnail.py template.pptx grid --cols 4
+ # Creates: grid.jpg (or grid-1.jpg, grid-2.jpg for large decks)
+"""
+
+import argparse
+import subprocess
+import sys
+import tempfile
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+from office.soffice import get_soffice_env
+from PIL import Image, ImageDraw, ImageFont
+
+THUMBNAIL_WIDTH = 300
+CONVERSION_DPI = 100
+MAX_COLS = 6
+DEFAULT_COLS = 3
+JPEG_QUALITY = 95
+GRID_PADDING = 20
+BORDER_WIDTH = 2
+FONT_SIZE_RATIO = 0.10
+LABEL_PADDING_RATIO = 0.4
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Create thumbnail grids from PowerPoint slides."
+ )
+ parser.add_argument("input", help="Input PowerPoint file (.pptx)")
+ parser.add_argument(
+ "output_prefix",
+ nargs="?",
+ default="thumbnails",
+ help="Output prefix for image files (default: thumbnails)",
+ )
+ parser.add_argument(
+ "--cols",
+ type=int,
+ default=DEFAULT_COLS,
+ help=f"Number of columns (default: {DEFAULT_COLS}, max: {MAX_COLS})",
+ )
+
+ args = parser.parse_args()
+
+ cols = min(args.cols, MAX_COLS)
+ if args.cols > MAX_COLS:
+ print(f"Warning: Columns limited to {MAX_COLS}")
+
+ input_path = Path(args.input)
+ if not input_path.exists() or input_path.suffix.lower() != ".pptx":
+ print(f"Error: Invalid PowerPoint file: {args.input}", file=sys.stderr)
+ sys.exit(1)
+
+ output_path = Path(f"{args.output_prefix}.jpg")
+
+ try:
+ slide_info = get_slide_info(input_path)
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+ visible_images = convert_to_images(input_path, temp_path)
+
+ if not visible_images and not any(s["hidden"] for s in slide_info):
+ print("Error: No slides found", file=sys.stderr)
+ sys.exit(1)
+
+ slides = build_slide_list(slide_info, visible_images, temp_path)
+
+ grid_files = create_grids(slides, cols, THUMBNAIL_WIDTH, output_path)
+
+ print(f"Created {len(grid_files)} grid(s):")
+ for grid_file in grid_files:
+ print(f" {grid_file}")
+
+ except Exception as e:
+ print(f"Error: {e}", file=sys.stderr)
+ sys.exit(1)
+
+
+def get_slide_info(pptx_path: Path) -> list[dict]:
+ with zipfile.ZipFile(pptx_path, "r") as zf:
+ rels_content = zf.read("ppt/_rels/presentation.xml.rels").decode("utf-8")
+ rels_dom = defusedxml.minidom.parseString(rels_content)
+
+ rid_to_slide = {}
+ for rel in rels_dom.getElementsByTagName("Relationship"):
+ rid = rel.getAttribute("Id")
+ target = rel.getAttribute("Target")
+ rel_type = rel.getAttribute("Type")
+ if "slide" in rel_type and target.startswith("slides/"):
+ rid_to_slide[rid] = target.replace("slides/", "")
+
+ pres_content = zf.read("ppt/presentation.xml").decode("utf-8")
+ pres_dom = defusedxml.minidom.parseString(pres_content)
+
+ slides = []
+ for sld_id in pres_dom.getElementsByTagName("p:sldId"):
+ rid = sld_id.getAttribute("r:id")
+ if rid in rid_to_slide:
+ hidden = sld_id.getAttribute("show") == "0"
+ slides.append({"name": rid_to_slide[rid], "hidden": hidden})
+
+ return slides
+
+
+def build_slide_list(
+ slide_info: list[dict],
+ visible_images: list[Path],
+ temp_dir: Path,
+) -> list[tuple[Path, str]]:
+ if visible_images:
+ with Image.open(visible_images[0]) as img:
+ placeholder_size = img.size
+ else:
+ placeholder_size = (1920, 1080)
+
+ slides = []
+ visible_idx = 0
+
+ for info in slide_info:
+ if info["hidden"]:
+ placeholder_path = temp_dir / f"hidden-{info['name']}.jpg"
+ placeholder_img = create_hidden_placeholder(placeholder_size)
+ placeholder_img.save(placeholder_path, "JPEG")
+ slides.append((placeholder_path, f"{info['name']} (hidden)"))
+ else:
+ if visible_idx < len(visible_images):
+ slides.append((visible_images[visible_idx], info["name"]))
+ visible_idx += 1
+
+ return slides
+
+
+def create_hidden_placeholder(size: tuple[int, int]) -> Image.Image:
+ img = Image.new("RGB", size, color="#F0F0F0")
+ draw = ImageDraw.Draw(img)
+ line_width = max(5, min(size) // 100)
+ draw.line([(0, 0), size], fill="#CCCCCC", width=line_width)
+ draw.line([(size[0], 0), (0, size[1])], fill="#CCCCCC", width=line_width)
+ return img
+
+
+def convert_to_images(pptx_path: Path, temp_dir: Path) -> list[Path]:
+ pdf_path = temp_dir / f"{pptx_path.stem}.pdf"
+
+ result = subprocess.run(
+ [
+ "soffice",
+ "--headless",
+ "--convert-to",
+ "pdf",
+ "--outdir",
+ str(temp_dir),
+ str(pptx_path),
+ ],
+ capture_output=True,
+ text=True,
+ env=get_soffice_env(),
+ )
+ if result.returncode != 0 or not pdf_path.exists():
+ raise RuntimeError("PDF conversion failed")
+
+ result = subprocess.run(
+ [
+ "pdftoppm",
+ "-jpeg",
+ "-r",
+ str(CONVERSION_DPI),
+ str(pdf_path),
+ str(temp_dir / "slide"),
+ ],
+ capture_output=True,
+ text=True,
+ )
+ if result.returncode != 0:
+ raise RuntimeError("Image conversion failed")
+
+ return sorted(temp_dir.glob("slide-*.jpg"))
+
+
+def create_grids(
+ slides: list[tuple[Path, str]],
+ cols: int,
+ width: int,
+ output_path: Path,
+) -> list[str]:
+ max_per_grid = cols * (cols + 1)
+ grid_files = []
+
+ for chunk_idx, start_idx in enumerate(range(0, len(slides), max_per_grid)):
+ end_idx = min(start_idx + max_per_grid, len(slides))
+ chunk_slides = slides[start_idx:end_idx]
+
+ grid = create_grid(chunk_slides, cols, width)
+
+ if len(slides) <= max_per_grid:
+ grid_filename = output_path
+ else:
+ stem = output_path.stem
+ suffix = output_path.suffix
+ grid_filename = output_path.parent / f"{stem}-{chunk_idx + 1}{suffix}"
+
+ grid_filename.parent.mkdir(parents=True, exist_ok=True)
+ grid.save(str(grid_filename), quality=JPEG_QUALITY)
+ grid_files.append(str(grid_filename))
+
+ return grid_files
+
+
+def create_grid(
+ slides: list[tuple[Path, str]],
+ cols: int,
+ width: int,
+) -> Image.Image:
+ font_size = int(width * FONT_SIZE_RATIO)
+ label_padding = int(font_size * LABEL_PADDING_RATIO)
+
+ with Image.open(slides[0][0]) as img:
+ aspect = img.height / img.width
+ height = int(width * aspect)
+
+ rows = (len(slides) + cols - 1) // cols
+ grid_w = cols * width + (cols + 1) * GRID_PADDING
+ grid_h = rows * (height + font_size + label_padding * 2) + (rows + 1) * GRID_PADDING
+
+ grid = Image.new("RGB", (grid_w, grid_h), "white")
+ draw = ImageDraw.Draw(grid)
+
+ try:
+ font = ImageFont.load_default(size=font_size)
+ except Exception:
+ font = ImageFont.load_default()
+
+ for i, (img_path, slide_name) in enumerate(slides):
+ row, col = i // cols, i % cols
+ x = col * width + (col + 1) * GRID_PADDING
+ y_base = (
+ row * (height + font_size + label_padding * 2) + (row + 1) * GRID_PADDING
+ )
+
+ label = slide_name
+ bbox = draw.textbbox((0, 0), label, font=font)
+ text_w = bbox[2] - bbox[0]
+ draw.text(
+ (x + (width - text_w) // 2, y_base + label_padding),
+ label,
+ fill="black",
+ font=font,
+ )
+
+ y_thumbnail = y_base + label_padding + font_size + label_padding
+
+ with Image.open(img_path) as img:
+ img.thumbnail((width, height), Image.Resampling.LANCZOS)
+ w, h = img.size
+ tx = x + (width - w) // 2
+ ty = y_thumbnail + (height - h) // 2
+ grid.paste(img, (tx, ty))
+
+ if BORDER_WIDTH > 0:
+ draw.rectangle(
+ [
+ (tx - BORDER_WIDTH, ty - BORDER_WIDTH),
+ (tx + w + BORDER_WIDTH - 1, ty + h + BORDER_WIDTH - 1),
+ ],
+ outline="gray",
+ width=BORDER_WIDTH,
+ )
+
+ return grid
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/.openskills.json b/.claude/skills/skill-creator/.openskills.json
new file mode 100644
index 0000000..f180be0
--- /dev/null
+++ b/.claude/skills/skill-creator/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\skill-creator",
+ "installedAt": "2026-03-02T09:19:50.154Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/skill-creator/LICENSE.txt b/.claude/skills/skill-creator/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/skill-creator/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/skill-creator/SKILL.md b/.claude/skills/skill-creator/SKILL.md
new file mode 100644
index 0000000..942bfe8
--- /dev/null
+++ b/.claude/skills/skill-creator/SKILL.md
@@ -0,0 +1,479 @@
+---
+name: skill-creator
+description: Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, update or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill's description for better triggering accuracy.
+---
+
+# Skill Creator
+
+A skill for creating new skills and iteratively improving them.
+
+At a high level, the process of creating a skill goes like this:
+
+- Decide what you want the skill to do and roughly how it should do it
+- Write a draft of the skill
+- Create a few test prompts and run claude-with-access-to-the-skill on them
+- Help the user evaluate the results both qualitatively and quantitatively
+ - While the runs happen in the background, draft some quantitative evals if there aren't any (if there are some, you can either use as is or modify if you feel something needs to change about them). Then explain them to the user (or if they already existed, explain the ones that already exist)
+ - Use the `eval-viewer/generate_review.py` script to show the user the results for them to look at, and also let them look at the quantitative metrics
+- Rewrite the skill based on feedback from the user's evaluation of the results (and also if there are any glaring flaws that become apparent from the quantitative benchmarks)
+- Repeat until you're satisfied
+- Expand the test set and try again at larger scale
+
+Your job when using this skill is to figure out where the user is in this process and then jump in and help them progress through these stages. So for instance, maybe they're like "I want to make a skill for X". You can help narrow down what they mean, write a draft, write the test cases, figure out how they want to evaluate, run all the prompts, and repeat.
+
+On the other hand, maybe they already have a draft of the skill. In this case you can go straight to the eval/iterate part of the loop.
+
+Of course, you should always be flexible and if the user is like "I don't need to run a bunch of evaluations, just vibe with me", you can do that instead.
+
+Then after the skill is done (but again, the order is flexible), you can also run the skill description improver, which we have a whole separate script for, to optimize the triggering of the skill.
+
+Cool? Cool.
+
+## Communicating with the user
+
+The skill creator is liable to be used by people across a wide range of familiarity with coding jargon. If you haven't heard (and how could you, it's only very recently that it started), there's a trend now where the power of Claude is inspiring plumbers to open up their terminals, parents and grandparents to google "how to install npm". On the other hand, the bulk of users are probably fairly computer-literate.
+
+So please pay attention to context cues to understand how to phrase your communication! In the default case, just to give you some idea:
+
+- "evaluation" and "benchmark" are borderline, but OK
+- for "JSON" and "assertion" you want to see serious cues from the user that they know what those things are before using them without explaining them
+
+It's OK to briefly explain terms if you're in doubt, and feel free to clarify terms with a short definition if you're unsure if the user will get it.
+
+---
+
+## Creating a skill
+
+### Capture Intent
+
+Start by understanding the user's intent. The current conversation might already contain a workflow the user wants to capture (e.g., they say "turn this into a skill"). If so, extract answers from the conversation history first — the tools used, the sequence of steps, corrections the user made, input/output formats observed. The user may need to fill the gaps, and should confirm before proceeding to the next step.
+
+1. What should this skill enable Claude to do?
+2. When should this skill trigger? (what user phrases/contexts)
+3. What's the expected output format?
+4. Should we set up test cases to verify the skill works? Skills with objectively verifiable outputs (file transforms, data extraction, code generation, fixed workflow steps) benefit from test cases. Skills with subjective outputs (writing style, art) often don't need them. Suggest the appropriate default based on the skill type, but let the user decide.
+
+### Interview and Research
+
+Proactively ask questions about edge cases, input/output formats, example files, success criteria, and dependencies. Wait to write test prompts until you've got this part ironed out.
+
+Check available MCPs - if useful for research (searching docs, finding similar skills, looking up best practices), research in parallel via subagents if available, otherwise inline. Come prepared with context to reduce burden on the user.
+
+### Write the SKILL.md
+
+Based on the user interview, fill in these components:
+
+- **name**: Skill identifier
+- **description**: When to trigger, what it does. This is the primary triggering mechanism - include both what the skill does AND specific contexts for when to use it. All "when to use" info goes here, not in the body. Note: currently Claude has a tendency to "undertrigger" skills -- to not use them when they'd be useful. To combat this, please make the skill descriptions a little bit "pushy". So for instance, instead of "How to build a simple fast dashboard to display internal Anthropic data.", you might write "How to build a simple fast dashboard to display internal Anthropic data. Make sure to use this skill whenever the user mentions dashboards, data visualization, internal metrics, or wants to display any kind of company data, even if they don't explicitly ask for a 'dashboard.'"
+- **compatibility**: Required tools, dependencies (optional, rarely needed)
+- **the rest of the skill :)**
+
+### Skill Writing Guide
+
+#### Anatomy of a Skill
+
+```
+skill-name/
+├── SKILL.md (required)
+│ ├── YAML frontmatter (name, description required)
+│ └── Markdown instructions
+└── Bundled Resources (optional)
+ ├── scripts/ - Executable code for deterministic/repetitive tasks
+ ├── references/ - Docs loaded into context as needed
+ └── assets/ - Files used in output (templates, icons, fonts)
+```
+
+#### Progressive Disclosure
+
+Skills use a three-level loading system:
+1. **Metadata** (name + description) - Always in context (~100 words)
+2. **SKILL.md body** - In context whenever skill triggers (<500 lines ideal)
+3. **Bundled resources** - As needed (unlimited, scripts can execute without loading)
+
+These word counts are approximate and you can feel free to go longer if needed.
+
+**Key patterns:**
+- Keep SKILL.md under 500 lines; if you're approaching this limit, add an additional layer of hierarchy along with clear pointers about where the model using the skill should go next to follow up.
+- Reference files clearly from SKILL.md with guidance on when to read them
+- For large reference files (>300 lines), include a table of contents
+
+**Domain organization**: When a skill supports multiple domains/frameworks, organize by variant:
+```
+cloud-deploy/
+├── SKILL.md (workflow + selection)
+└── references/
+ ├── aws.md
+ ├── gcp.md
+ └── azure.md
+```
+Claude reads only the relevant reference file.
+
+#### Principle of Lack of Surprise
+
+This goes without saying, but skills must not contain malware, exploit code, or any content that could compromise system security. A skill's contents should not surprise the user in their intent if described. Don't go along with requests to create misleading skills or skills designed to facilitate unauthorized access, data exfiltration, or other malicious activities. Things like a "roleplay as an XYZ" are OK though.
+
+#### Writing Patterns
+
+Prefer using the imperative form in instructions.
+
+**Defining output formats** - You can do it like this:
+```markdown
+## Report structure
+ALWAYS use this exact template:
+# [Title]
+## Executive summary
+## Key findings
+## Recommendations
+```
+
+**Examples pattern** - It's useful to include examples. You can format them like this (but if "Input" and "Output" are in the examples you might want to deviate a little):
+```markdown
+## Commit message format
+**Example 1:**
+Input: Added user authentication with JWT tokens
+Output: feat(auth): implement JWT-based authentication
+```
+
+### Writing Style
+
+Try to explain to the model why things are important in lieu of heavy-handed musty MUSTs. Use theory of mind and try to make the skill general and not super-narrow to specific examples. Start by writing a draft and then look at it with fresh eyes and improve it.
+
+### Test Cases
+
+After writing the skill draft, come up with 2-3 realistic test prompts — the kind of thing a real user would actually say. Share them with the user: [you don't have to use this exact language] "Here are a few test cases I'd like to try. Do these look right, or do you want to add more?" Then run them.
+
+Save test cases to `evals/evals.json`. Don't write assertions yet — just the prompts. You'll draft assertions in the next step while the runs are in progress.
+
+```json
+{
+ "skill_name": "example-skill",
+ "evals": [
+ {
+ "id": 1,
+ "prompt": "User's task prompt",
+ "expected_output": "Description of expected result",
+ "files": []
+ }
+ ]
+}
+```
+
+See `references/schemas.md` for the full schema (including the `assertions` field, which you'll add later).
+
+## Running and evaluating test cases
+
+This section is one continuous sequence — don't stop partway through. Do NOT use `/skill-test` or any other testing skill.
+
+Put results in `-workspace/` as a sibling to the skill directory. Within the workspace, organize results by iteration (`iteration-1/`, `iteration-2/`, etc.) and within that, each test case gets a directory (`eval-0/`, `eval-1/`, etc.). Don't create all of this upfront — just create directories as you go.
+
+### Step 1: Spawn all runs (with-skill AND baseline) in the same turn
+
+For each test case, spawn two subagents in the same turn — one with the skill, one without. This is important: don't spawn the with-skill runs first and then come back for baselines later. Launch everything at once so it all finishes around the same time.
+
+**With-skill run:**
+
+```
+Execute this task:
+- Skill path:
+- Task:
+- Input files:
+- Save outputs to: /iteration-/eval-/with_skill/outputs/
+- Outputs to save:
+```
+
+**Baseline run** (same prompt, but the baseline depends on context):
+- **Creating a new skill**: no skill at all. Same prompt, no skill path, save to `without_skill/outputs/`.
+- **Improving an existing skill**: the old version. Before editing, snapshot the skill (`cp -r /skill-snapshot/`), then point the baseline subagent at the snapshot. Save to `old_skill/outputs/`.
+
+Write an `eval_metadata.json` for each test case (assertions can be empty for now). Give each eval a descriptive name based on what it's testing — not just "eval-0". Use this name for the directory too. If this iteration uses new or modified eval prompts, create these files for each new eval directory — don't assume they carry over from previous iterations.
+
+```json
+{
+ "eval_id": 0,
+ "eval_name": "descriptive-name-here",
+ "prompt": "The user's task prompt",
+ "assertions": []
+}
+```
+
+### Step 2: While runs are in progress, draft assertions
+
+Don't just wait for the runs to finish — you can use this time productively. Draft quantitative assertions for each test case and explain them to the user. If assertions already exist in `evals/evals.json`, review them and explain what they check.
+
+Good assertions are objectively verifiable and have descriptive names — they should read clearly in the benchmark viewer so someone glancing at the results immediately understands what each one checks. Subjective skills (writing style, design quality) are better evaluated qualitatively — don't force assertions onto things that need human judgment.
+
+Update the `eval_metadata.json` files and `evals/evals.json` with the assertions once drafted. Also explain to the user what they'll see in the viewer — both the qualitative outputs and the quantitative benchmark.
+
+### Step 3: As runs complete, capture timing data
+
+When each subagent task completes, you receive a notification containing `total_tokens` and `duration_ms`. Save this data immediately to `timing.json` in the run directory:
+
+```json
+{
+ "total_tokens": 84852,
+ "duration_ms": 23332,
+ "total_duration_seconds": 23.3
+}
+```
+
+This is the only opportunity to capture this data — it comes through the task notification and isn't persisted elsewhere. Process each notification as it arrives rather than trying to batch them.
+
+### Step 4: Grade, aggregate, and launch the viewer
+
+Once all runs are done:
+
+1. **Grade each run** — spawn a grader subagent (or grade inline) that reads `agents/grader.md` and evaluates each assertion against the outputs. Save results to `grading.json` in each run directory. The grading.json expectations array must use the fields `text`, `passed`, and `evidence` (not `name`/`met`/`details` or other variants) — the viewer depends on these exact field names. For assertions that can be checked programmatically, write and run a script rather than eyeballing it — scripts are faster, more reliable, and can be reused across iterations.
+
+2. **Aggregate into benchmark** — run the aggregation script from the skill-creator directory:
+ ```bash
+ python -m scripts.aggregate_benchmark /iteration-N --skill-name
+ ```
+ This produces `benchmark.json` and `benchmark.md` with pass_rate, time, and tokens for each configuration, with mean ± stddev and the delta. If generating benchmark.json manually, see `references/schemas.md` for the exact schema the viewer expects.
+Put each with_skill version before its baseline counterpart.
+
+3. **Do an analyst pass** — read the benchmark data and surface patterns the aggregate stats might hide. See `agents/analyzer.md` (the "Analyzing Benchmark Results" section) for what to look for — things like assertions that always pass regardless of skill (non-discriminating), high-variance evals (possibly flaky), and time/token tradeoffs.
+
+4. **Launch the viewer** with both qualitative outputs and quantitative data:
+ ```bash
+ nohup python /eval-viewer/generate_review.py \
+ /iteration-N \
+ --skill-name "my-skill" \
+ --benchmark /iteration-N/benchmark.json \
+ > /dev/null 2>&1 &
+ VIEWER_PID=$!
+ ```
+ For iteration 2+, also pass `--previous-workspace /iteration-`.
+
+ **Cowork / headless environments:** If `webbrowser.open()` is not available or the environment has no display, use `--static ` to write a standalone HTML file instead of starting a server. Feedback will be downloaded as a `feedback.json` file when the user clicks "Submit All Reviews". After download, copy `feedback.json` into the workspace directory for the next iteration to pick up.
+
+Note: please use generate_review.py to create the viewer; there's no need to write custom HTML.
+
+5. **Tell the user** something like: "I've opened the results in your browser. There are two tabs — 'Outputs' lets you click through each test case and leave feedback, 'Benchmark' shows the quantitative comparison. When you're done, come back here and let me know."
+
+### What the user sees in the viewer
+
+The "Outputs" tab shows one test case at a time:
+- **Prompt**: the task that was given
+- **Output**: the files the skill produced, rendered inline where possible
+- **Previous Output** (iteration 2+): collapsed section showing last iteration's output
+- **Formal Grades** (if grading was run): collapsed section showing assertion pass/fail
+- **Feedback**: a textbox that auto-saves as they type
+- **Previous Feedback** (iteration 2+): their comments from last time, shown below the textbox
+
+The "Benchmark" tab shows the stats summary: pass rates, timing, and token usage for each configuration, with per-eval breakdowns and analyst observations.
+
+Navigation is via prev/next buttons or arrow keys. When done, they click "Submit All Reviews" which saves all feedback to `feedback.json`.
+
+### Step 5: Read the feedback
+
+When the user tells you they're done, read `feedback.json`:
+
+```json
+{
+ "reviews": [
+ {"run_id": "eval-0-with_skill", "feedback": "the chart is missing axis labels", "timestamp": "..."},
+ {"run_id": "eval-1-with_skill", "feedback": "", "timestamp": "..."},
+ {"run_id": "eval-2-with_skill", "feedback": "perfect, love this", "timestamp": "..."}
+ ],
+ "status": "complete"
+}
+```
+
+Empty feedback means the user thought it was fine. Focus your improvements on the test cases where the user had specific complaints.
+
+Kill the viewer server when you're done with it:
+
+```bash
+kill $VIEWER_PID 2>/dev/null
+```
+
+---
+
+## Improving the skill
+
+This is the heart of the loop. You've run the test cases, the user has reviewed the results, and now you need to make the skill better based on their feedback.
+
+### How to think about improvements
+
+1. **Generalize from the feedback.** The big picture thing that's happening here is that we're trying to create skills that can be used a million times (maybe literally, maybe even more who knows) across many different prompts. Here you and the user are iterating on only a few examples over and over again because it helps move faster. The user knows these examples in and out and it's quick for them to assess new outputs. But if the skill you and the user are codeveloping works only for those examples, it's useless. Rather than put in fiddly overfitty changes, or oppressively constrictive MUSTs, if there's some stubborn issue, you might try branching out and using different metaphors, or recommending different patterns of working. It's relatively cheap to try and maybe you'll land on something great.
+
+2. **Keep the prompt lean.** Remove things that aren't pulling their weight. Make sure to read the transcripts, not just the final outputs — if it looks like the skill is making the model waste a bunch of time doing things that are unproductive, you can try getting rid of the parts of the skill that are making it do that and seeing what happens.
+
+3. **Explain the why.** Try hard to explain the **why** behind everything you're asking the model to do. Today's LLMs are *smart*. They have good theory of mind and when given a good harness can go beyond rote instructions and really make things happen. Even if the feedback from the user is terse or frustrated, try to actually understand the task and why the user is writing what they wrote, and what they actually wrote, and then transmit this understanding into the instructions. If you find yourself writing ALWAYS or NEVER in all caps, or using super rigid structures, that's a yellow flag — if possible, reframe and explain the reasoning so that the model understands why the thing you're asking for is important. That's a more humane, powerful, and effective approach.
+
+4. **Look for repeated work across test cases.** Read the transcripts from the test runs and notice if the subagents all independently wrote similar helper scripts or took the same multi-step approach to something. If all 3 test cases resulted in the subagent writing a `create_docx.py` or a `build_chart.py`, that's a strong signal the skill should bundle that script. Write it once, put it in `scripts/`, and tell the skill to use it. This saves every future invocation from reinventing the wheel.
+
+This task is pretty important (we are trying to create billions a year in economic value here!) and your thinking time is not the blocker; take your time and really mull things over. I'd suggest writing a draft revision and then looking at it anew and making improvements. Really do your best to get into the head of the user and understand what they want and need.
+
+### The iteration loop
+
+After improving the skill:
+
+1. Apply your improvements to the skill
+2. Rerun all test cases into a new `iteration-/` directory, including baseline runs. If you're creating a new skill, the baseline is always `without_skill` (no skill) — that stays the same across iterations. If you're improving an existing skill, use your judgment on what makes sense as the baseline: the original version the user came in with, or the previous iteration.
+3. Launch the reviewer with `--previous-workspace` pointing at the previous iteration
+4. Wait for the user to review and tell you they're done
+5. Read the new feedback, improve again, repeat
+
+Keep going until:
+- The user says they're happy
+- The feedback is all empty (everything looks good)
+- You're not making meaningful progress
+
+---
+
+## Advanced: Blind comparison
+
+For situations where you want a more rigorous comparison between two versions of a skill (e.g., the user asks "is the new version actually better?"), there's a blind comparison system. Read `agents/comparator.md` and `agents/analyzer.md` for the details. The basic idea is: give two outputs to an independent agent without telling it which is which, and let it judge quality. Then analyze why the winner won.
+
+This is optional, requires subagents, and most users won't need it. The human review loop is usually sufficient.
+
+---
+
+## Description Optimization
+
+The description field in SKILL.md frontmatter is the primary mechanism that determines whether Claude invokes a skill. After creating or improving a skill, offer to optimize the description for better triggering accuracy.
+
+### Step 1: Generate trigger eval queries
+
+Create 20 eval queries — a mix of should-trigger and should-not-trigger. Save as JSON:
+
+```json
+[
+ {"query": "the user prompt", "should_trigger": true},
+ {"query": "another prompt", "should_trigger": false}
+]
+```
+
+The queries must be realistic and something a Claude Code or Claude.ai user would actually type. Not abstract requests, but requests that are concrete and specific and have a good amount of detail. For instance, file paths, personal context about the user's job or situation, column names and values, company names, URLs. A little bit of backstory. Some might be in lowercase or contain abbreviations or typos or casual speech. Use a mix of different lengths, and focus on edge cases rather than making them clear-cut (the user will get a chance to sign off on them).
+
+Bad: `"Format this data"`, `"Extract text from PDF"`, `"Create a chart"`
+
+Good: `"ok so my boss just sent me this xlsx file (its in my downloads, called something like 'Q4 sales final FINAL v2.xlsx') and she wants me to add a column that shows the profit margin as a percentage. The revenue is in column C and costs are in column D i think"`
+
+For the **should-trigger** queries (8-10), think about coverage. You want different phrasings of the same intent — some formal, some casual. Include cases where the user doesn't explicitly name the skill or file type but clearly needs it. Throw in some uncommon use cases and cases where this skill competes with another but should win.
+
+For the **should-not-trigger** queries (8-10), the most valuable ones are the near-misses — queries that share keywords or concepts with the skill but actually need something different. Think adjacent domains, ambiguous phrasing where a naive keyword match would trigger but shouldn't, and cases where the query touches on something the skill does but in a context where another tool is more appropriate.
+
+The key thing to avoid: don't make should-not-trigger queries obviously irrelevant. "Write a fibonacci function" as a negative test for a PDF skill is too easy — it doesn't test anything. The negative cases should be genuinely tricky.
+
+### Step 2: Review with user
+
+Present the eval set to the user for review using the HTML template:
+
+1. Read the template from `assets/eval_review.html`
+2. Replace the placeholders:
+ - `__EVAL_DATA_PLACEHOLDER__` → the JSON array of eval items (no quotes around it — it's a JS variable assignment)
+ - `__SKILL_NAME_PLACEHOLDER__` → the skill's name
+ - `__SKILL_DESCRIPTION_PLACEHOLDER__` → the skill's current description
+3. Write to a temp file (e.g., `/tmp/eval_review_.html`) and open it: `open /tmp/eval_review_.html`
+4. The user can edit queries, toggle should-trigger, add/remove entries, then click "Export Eval Set"
+5. The file downloads to `~/Downloads/eval_set.json` — check the Downloads folder for the most recent version in case there are multiple (e.g., `eval_set (1).json`)
+
+This step matters — bad eval queries lead to bad descriptions.
+
+### Step 3: Run the optimization loop
+
+Tell the user: "This will take some time — I'll run the optimization loop in the background and check on it periodically."
+
+Save the eval set to the workspace, then run in the background:
+
+```bash
+python -m scripts.run_loop \
+ --eval-set \
+ --skill-path \
+ --model \
+ --max-iterations 5 \
+ --verbose
+```
+
+Use the model ID from your system prompt (the one powering the current session) so the triggering test matches what the user actually experiences.
+
+While it runs, periodically tail the output to give the user updates on which iteration it's on and what the scores look like.
+
+This handles the full optimization loop automatically. It splits the eval set into 60% train and 40% held-out test, evaluates the current description (running each query 3 times to get a reliable trigger rate), then calls Claude with extended thinking to propose improvements based on what failed. It re-evaluates each new description on both train and test, iterating up to 5 times. When it's done, it opens an HTML report in the browser showing the results per iteration and returns JSON with `best_description` — selected by test score rather than train score to avoid overfitting.
+
+### How skill triggering works
+
+Understanding the triggering mechanism helps design better eval queries. Skills appear in Claude's `available_skills` list with their name + description, and Claude decides whether to consult a skill based on that description. The important thing to know is that Claude only consults skills for tasks it can't easily handle on its own — simple, one-step queries like "read this PDF" may not trigger a skill even if the description matches perfectly, because Claude can handle them directly with basic tools. Complex, multi-step, or specialized queries reliably trigger skills when the description matches.
+
+This means your eval queries should be substantive enough that Claude would actually benefit from consulting a skill. Simple queries like "read file X" are poor test cases — they won't trigger skills regardless of description quality.
+
+### Step 4: Apply the result
+
+Take `best_description` from the JSON output and update the skill's SKILL.md frontmatter. Show the user before/after and report the scores.
+
+---
+
+### Package and Present (only if `present_files` tool is available)
+
+Check whether you have access to the `present_files` tool. If you don't, skip this step. If you do, package the skill and present the .skill file to the user:
+
+```bash
+python -m scripts.package_skill
+```
+
+After packaging, direct the user to the resulting `.skill` file path so they can install it.
+
+---
+
+## Claude.ai-specific instructions
+
+In Claude.ai, the core workflow is the same (draft → test → review → improve → repeat), but because Claude.ai doesn't have subagents, some mechanics change. Here's what to adapt:
+
+**Running test cases**: No subagents means no parallel execution. For each test case, read the skill's SKILL.md, then follow its instructions to accomplish the test prompt yourself. Do them one at a time. This is less rigorous than independent subagents (you wrote the skill and you're also running it, so you have full context), but it's a useful sanity check — and the human review step compensates. Skip the baseline runs — just use the skill to complete the task as requested.
+
+**Reviewing results**: If you can't open a browser (e.g., Claude.ai's VM has no display, or you're on a remote server), skip the browser reviewer entirely. Instead, present results directly in the conversation. For each test case, show the prompt and the output. If the output is a file the user needs to see (like a .docx or .xlsx), save it to the filesystem and tell them where it is so they can download and inspect it. Ask for feedback inline: "How does this look? Anything you'd change?"
+
+**Benchmarking**: Skip the quantitative benchmarking — it relies on baseline comparisons which aren't meaningful without subagents. Focus on qualitative feedback from the user.
+
+**The iteration loop**: Same as before — improve the skill, rerun the test cases, ask for feedback — just without the browser reviewer in the middle. You can still organize results into iteration directories on the filesystem if you have one.
+
+**Description optimization**: This section requires the `claude` CLI tool (specifically `claude -p`) which is only available in Claude Code. Skip it if you're on Claude.ai.
+
+**Blind comparison**: Requires subagents. Skip it.
+
+**Packaging**: The `package_skill.py` script works anywhere with Python and a filesystem. On Claude.ai, you can run it and the user can download the resulting `.skill` file.
+
+---
+
+## Cowork-Specific Instructions
+
+If you're in Cowork, the main things to know are:
+
+- You have subagents, so the main workflow (spawn test cases in parallel, run baselines, grade, etc.) all works. (However, if you run into severe problems with timeouts, it's OK to run the test prompts in series rather than parallel.)
+- You don't have a browser or display, so when generating the eval viewer, use `--static ` to write a standalone HTML file instead of starting a server. Then proffer a link that the user can click to open the HTML in their browser.
+- For whatever reason, the Cowork setup seems to disincline Claude from generating the eval viewer after running the tests, so just to reiterate: whether you're in Cowork or in Claude Code, after running tests, you should always generate the eval viewer for the human to look at examples before revising the skill yourself and trying to make corrections, using `generate_review.py` (not writing your own boutique html code). Sorry in advance but I'm gonna go all caps here: GENERATE THE EVAL VIEWER *BEFORE* evaluating inputs yourself. You want to get them in front of the human ASAP!
+- Feedback works differently: since there's no running server, the viewer's "Submit All Reviews" button will download `feedback.json` as a file. You can then read it from there (you may have to request access first).
+- Packaging works — `package_skill.py` just needs Python and a filesystem.
+- Description optimization (`run_loop.py` / `run_eval.py`) should work in Cowork just fine since it uses `claude -p` via subprocess, not a browser, but please save it until you've fully finished making the skill and the user agrees it's in good shape.
+
+---
+
+## Reference files
+
+The agents/ directory contains instructions for specialized subagents. Read them when you need to spawn the relevant subagent.
+
+- `agents/grader.md` — How to evaluate assertions against outputs
+- `agents/comparator.md` — How to do blind A/B comparison between two outputs
+- `agents/analyzer.md` — How to analyze why one version beat another
+
+The references/ directory has additional documentation:
+- `references/schemas.md` — JSON structures for evals.json, grading.json, etc.
+
+---
+
+Repeating one more time the core loop here for emphasis:
+
+- Figure out what the skill is about
+- Draft or edit the skill
+- Run claude-with-access-to-the-skill on test prompts
+- With the user, evaluate the outputs:
+ - Create benchmark.json and run `eval-viewer/generate_review.py` to help the user review them
+ - Run quantitative evals
+- Repeat until you and the user are satisfied
+- Package the final skill and return it to the user.
+
+Please add steps to your TodoList, if you have such a thing, to make sure you don't forget. If you're in Cowork, please specifically put "Create evals JSON and run `eval-viewer/generate_review.py` so human can review test cases" in your TodoList to make sure it happens.
+
+Good luck!
diff --git a/.claude/skills/skill-creator/agents/analyzer.md b/.claude/skills/skill-creator/agents/analyzer.md
new file mode 100644
index 0000000..14e41d6
--- /dev/null
+++ b/.claude/skills/skill-creator/agents/analyzer.md
@@ -0,0 +1,274 @@
+# Post-hoc Analyzer Agent
+
+Analyze blind comparison results to understand WHY the winner won and generate improvement suggestions.
+
+## Role
+
+After the blind comparator determines a winner, the Post-hoc Analyzer "unblids" the results by examining the skills and transcripts. The goal is to extract actionable insights: what made the winner better, and how can the loser be improved?
+
+## Inputs
+
+You receive these parameters in your prompt:
+
+- **winner**: "A" or "B" (from blind comparison)
+- **winner_skill_path**: Path to the skill that produced the winning output
+- **winner_transcript_path**: Path to the execution transcript for the winner
+- **loser_skill_path**: Path to the skill that produced the losing output
+- **loser_transcript_path**: Path to the execution transcript for the loser
+- **comparison_result_path**: Path to the blind comparator's output JSON
+- **output_path**: Where to save the analysis results
+
+## Process
+
+### Step 1: Read Comparison Result
+
+1. Read the blind comparator's output at comparison_result_path
+2. Note the winning side (A or B), the reasoning, and any scores
+3. Understand what the comparator valued in the winning output
+
+### Step 2: Read Both Skills
+
+1. Read the winner skill's SKILL.md and key referenced files
+2. Read the loser skill's SKILL.md and key referenced files
+3. Identify structural differences:
+ - Instructions clarity and specificity
+ - Script/tool usage patterns
+ - Example coverage
+ - Edge case handling
+
+### Step 3: Read Both Transcripts
+
+1. Read the winner's transcript
+2. Read the loser's transcript
+3. Compare execution patterns:
+ - How closely did each follow their skill's instructions?
+ - What tools were used differently?
+ - Where did the loser diverge from optimal behavior?
+ - Did either encounter errors or make recovery attempts?
+
+### Step 4: Analyze Instruction Following
+
+For each transcript, evaluate:
+- Did the agent follow the skill's explicit instructions?
+- Did the agent use the skill's provided tools/scripts?
+- Were there missed opportunities to leverage skill content?
+- Did the agent add unnecessary steps not in the skill?
+
+Score instruction following 1-10 and note specific issues.
+
+### Step 5: Identify Winner Strengths
+
+Determine what made the winner better:
+- Clearer instructions that led to better behavior?
+- Better scripts/tools that produced better output?
+- More comprehensive examples that guided edge cases?
+- Better error handling guidance?
+
+Be specific. Quote from skills/transcripts where relevant.
+
+### Step 6: Identify Loser Weaknesses
+
+Determine what held the loser back:
+- Ambiguous instructions that led to suboptimal choices?
+- Missing tools/scripts that forced workarounds?
+- Gaps in edge case coverage?
+- Poor error handling that caused failures?
+
+### Step 7: Generate Improvement Suggestions
+
+Based on the analysis, produce actionable suggestions for improving the loser skill:
+- Specific instruction changes to make
+- Tools/scripts to add or modify
+- Examples to include
+- Edge cases to address
+
+Prioritize by impact. Focus on changes that would have changed the outcome.
+
+### Step 8: Write Analysis Results
+
+Save structured analysis to `{output_path}`.
+
+## Output Format
+
+Write a JSON file with this structure:
+
+```json
+{
+ "comparison_summary": {
+ "winner": "A",
+ "winner_skill": "path/to/winner/skill",
+ "loser_skill": "path/to/loser/skill",
+ "comparator_reasoning": "Brief summary of why comparator chose winner"
+ },
+ "winner_strengths": [
+ "Clear step-by-step instructions for handling multi-page documents",
+ "Included validation script that caught formatting errors",
+ "Explicit guidance on fallback behavior when OCR fails"
+ ],
+ "loser_weaknesses": [
+ "Vague instruction 'process the document appropriately' led to inconsistent behavior",
+ "No script for validation, agent had to improvise and made errors",
+ "No guidance on OCR failure, agent gave up instead of trying alternatives"
+ ],
+ "instruction_following": {
+ "winner": {
+ "score": 9,
+ "issues": [
+ "Minor: skipped optional logging step"
+ ]
+ },
+ "loser": {
+ "score": 6,
+ "issues": [
+ "Did not use the skill's formatting template",
+ "Invented own approach instead of following step 3",
+ "Missed the 'always validate output' instruction"
+ ]
+ }
+ },
+ "improvement_suggestions": [
+ {
+ "priority": "high",
+ "category": "instructions",
+ "suggestion": "Replace 'process the document appropriately' with explicit steps: 1) Extract text, 2) Identify sections, 3) Format per template",
+ "expected_impact": "Would eliminate ambiguity that caused inconsistent behavior"
+ },
+ {
+ "priority": "high",
+ "category": "tools",
+ "suggestion": "Add validate_output.py script similar to winner skill's validation approach",
+ "expected_impact": "Would catch formatting errors before final output"
+ },
+ {
+ "priority": "medium",
+ "category": "error_handling",
+ "suggestion": "Add fallback instructions: 'If OCR fails, try: 1) different resolution, 2) image preprocessing, 3) manual extraction'",
+ "expected_impact": "Would prevent early failure on difficult documents"
+ }
+ ],
+ "transcript_insights": {
+ "winner_execution_pattern": "Read skill -> Followed 5-step process -> Used validation script -> Fixed 2 issues -> Produced output",
+ "loser_execution_pattern": "Read skill -> Unclear on approach -> Tried 3 different methods -> No validation -> Output had errors"
+ }
+}
+```
+
+## Guidelines
+
+- **Be specific**: Quote from skills and transcripts, don't just say "instructions were unclear"
+- **Be actionable**: Suggestions should be concrete changes, not vague advice
+- **Focus on skill improvements**: The goal is to improve the losing skill, not critique the agent
+- **Prioritize by impact**: Which changes would most likely have changed the outcome?
+- **Consider causation**: Did the skill weakness actually cause the worse output, or is it incidental?
+- **Stay objective**: Analyze what happened, don't editorialize
+- **Think about generalization**: Would this improvement help on other evals too?
+
+## Categories for Suggestions
+
+Use these categories to organize improvement suggestions:
+
+| Category | Description |
+|----------|-------------|
+| `instructions` | Changes to the skill's prose instructions |
+| `tools` | Scripts, templates, or utilities to add/modify |
+| `examples` | Example inputs/outputs to include |
+| `error_handling` | Guidance for handling failures |
+| `structure` | Reorganization of skill content |
+| `references` | External docs or resources to add |
+
+## Priority Levels
+
+- **high**: Would likely change the outcome of this comparison
+- **medium**: Would improve quality but may not change win/loss
+- **low**: Nice to have, marginal improvement
+
+---
+
+# Analyzing Benchmark Results
+
+When analyzing benchmark results, the analyzer's purpose is to **surface patterns and anomalies** across multiple runs, not suggest skill improvements.
+
+## Role
+
+Review all benchmark run results and generate freeform notes that help the user understand skill performance. Focus on patterns that wouldn't be visible from aggregate metrics alone.
+
+## Inputs
+
+You receive these parameters in your prompt:
+
+- **benchmark_data_path**: Path to the in-progress benchmark.json with all run results
+- **skill_path**: Path to the skill being benchmarked
+- **output_path**: Where to save the notes (as JSON array of strings)
+
+## Process
+
+### Step 1: Read Benchmark Data
+
+1. Read the benchmark.json containing all run results
+2. Note the configurations tested (with_skill, without_skill)
+3. Understand the run_summary aggregates already calculated
+
+### Step 2: Analyze Per-Assertion Patterns
+
+For each expectation across all runs:
+- Does it **always pass** in both configurations? (may not differentiate skill value)
+- Does it **always fail** in both configurations? (may be broken or beyond capability)
+- Does it **always pass with skill but fail without**? (skill clearly adds value here)
+- Does it **always fail with skill but pass without**? (skill may be hurting)
+- Is it **highly variable**? (flaky expectation or non-deterministic behavior)
+
+### Step 3: Analyze Cross-Eval Patterns
+
+Look for patterns across evals:
+- Are certain eval types consistently harder/easier?
+- Do some evals show high variance while others are stable?
+- Are there surprising results that contradict expectations?
+
+### Step 4: Analyze Metrics Patterns
+
+Look at time_seconds, tokens, tool_calls:
+- Does the skill significantly increase execution time?
+- Is there high variance in resource usage?
+- Are there outlier runs that skew the aggregates?
+
+### Step 5: Generate Notes
+
+Write freeform observations as a list of strings. Each note should:
+- State a specific observation
+- Be grounded in the data (not speculation)
+- Help the user understand something the aggregate metrics don't show
+
+Examples:
+- "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value"
+- "Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure that may be flaky"
+- "Without-skill runs consistently fail on table extraction expectations (0% pass rate)"
+- "Skill adds 13s average execution time but improves pass rate by 50%"
+- "Token usage is 80% higher with skill, primarily due to script output parsing"
+- "All 3 without-skill runs for eval 1 produced empty output"
+
+### Step 6: Write Notes
+
+Save notes to `{output_path}` as a JSON array of strings:
+
+```json
+[
+ "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value",
+ "Eval 3 shows high variance (50% ± 40%) - run 2 had an unusual failure",
+ "Without-skill runs consistently fail on table extraction expectations",
+ "Skill adds 13s average execution time but improves pass rate by 50%"
+]
+```
+
+## Guidelines
+
+**DO:**
+- Report what you observe in the data
+- Be specific about which evals, expectations, or runs you're referring to
+- Note patterns that aggregate metrics would hide
+- Provide context that helps interpret the numbers
+
+**DO NOT:**
+- Suggest improvements to the skill (that's for the improvement step, not benchmarking)
+- Make subjective quality judgments ("the output was good/bad")
+- Speculate about causes without evidence
+- Repeat information already in the run_summary aggregates
diff --git a/.claude/skills/skill-creator/agents/comparator.md b/.claude/skills/skill-creator/agents/comparator.md
new file mode 100644
index 0000000..80e00eb
--- /dev/null
+++ b/.claude/skills/skill-creator/agents/comparator.md
@@ -0,0 +1,202 @@
+# Blind Comparator Agent
+
+Compare two outputs WITHOUT knowing which skill produced them.
+
+## Role
+
+The Blind Comparator judges which output better accomplishes the eval task. You receive two outputs labeled A and B, but you do NOT know which skill produced which. This prevents bias toward a particular skill or approach.
+
+Your judgment is based purely on output quality and task completion.
+
+## Inputs
+
+You receive these parameters in your prompt:
+
+- **output_a_path**: Path to the first output file or directory
+- **output_b_path**: Path to the second output file or directory
+- **eval_prompt**: The original task/prompt that was executed
+- **expectations**: List of expectations to check (optional - may be empty)
+
+## Process
+
+### Step 1: Read Both Outputs
+
+1. Examine output A (file or directory)
+2. Examine output B (file or directory)
+3. Note the type, structure, and content of each
+4. If outputs are directories, examine all relevant files inside
+
+### Step 2: Understand the Task
+
+1. Read the eval_prompt carefully
+2. Identify what the task requires:
+ - What should be produced?
+ - What qualities matter (accuracy, completeness, format)?
+ - What would distinguish a good output from a poor one?
+
+### Step 3: Generate Evaluation Rubric
+
+Based on the task, generate a rubric with two dimensions:
+
+**Content Rubric** (what the output contains):
+| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) |
+|-----------|----------|----------------|---------------|
+| Correctness | Major errors | Minor errors | Fully correct |
+| Completeness | Missing key elements | Mostly complete | All elements present |
+| Accuracy | Significant inaccuracies | Minor inaccuracies | Accurate throughout |
+
+**Structure Rubric** (how the output is organized):
+| Criterion | 1 (Poor) | 3 (Acceptable) | 5 (Excellent) |
+|-----------|----------|----------------|---------------|
+| Organization | Disorganized | Reasonably organized | Clear, logical structure |
+| Formatting | Inconsistent/broken | Mostly consistent | Professional, polished |
+| Usability | Difficult to use | Usable with effort | Easy to use |
+
+Adapt criteria to the specific task. For example:
+- PDF form → "Field alignment", "Text readability", "Data placement"
+- Document → "Section structure", "Heading hierarchy", "Paragraph flow"
+- Data output → "Schema correctness", "Data types", "Completeness"
+
+### Step 4: Evaluate Each Output Against the Rubric
+
+For each output (A and B):
+
+1. **Score each criterion** on the rubric (1-5 scale)
+2. **Calculate dimension totals**: Content score, Structure score
+3. **Calculate overall score**: Average of dimension scores, scaled to 1-10
+
+### Step 5: Check Assertions (if provided)
+
+If expectations are provided:
+
+1. Check each expectation against output A
+2. Check each expectation against output B
+3. Count pass rates for each output
+4. Use expectation scores as secondary evidence (not the primary decision factor)
+
+### Step 6: Determine the Winner
+
+Compare A and B based on (in priority order):
+
+1. **Primary**: Overall rubric score (content + structure)
+2. **Secondary**: Assertion pass rates (if applicable)
+3. **Tiebreaker**: If truly equal, declare a TIE
+
+Be decisive - ties should be rare. One output is usually better, even if marginally.
+
+### Step 7: Write Comparison Results
+
+Save results to a JSON file at the path specified (or `comparison.json` if not specified).
+
+## Output Format
+
+Write a JSON file with this structure:
+
+```json
+{
+ "winner": "A",
+ "reasoning": "Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.",
+ "rubric": {
+ "A": {
+ "content": {
+ "correctness": 5,
+ "completeness": 5,
+ "accuracy": 4
+ },
+ "structure": {
+ "organization": 4,
+ "formatting": 5,
+ "usability": 4
+ },
+ "content_score": 4.7,
+ "structure_score": 4.3,
+ "overall_score": 9.0
+ },
+ "B": {
+ "content": {
+ "correctness": 3,
+ "completeness": 2,
+ "accuracy": 3
+ },
+ "structure": {
+ "organization": 3,
+ "formatting": 2,
+ "usability": 3
+ },
+ "content_score": 2.7,
+ "structure_score": 2.7,
+ "overall_score": 5.4
+ }
+ },
+ "output_quality": {
+ "A": {
+ "score": 9,
+ "strengths": ["Complete solution", "Well-formatted", "All fields present"],
+ "weaknesses": ["Minor style inconsistency in header"]
+ },
+ "B": {
+ "score": 5,
+ "strengths": ["Readable output", "Correct basic structure"],
+ "weaknesses": ["Missing date field", "Formatting inconsistencies", "Partial data extraction"]
+ }
+ },
+ "expectation_results": {
+ "A": {
+ "passed": 4,
+ "total": 5,
+ "pass_rate": 0.80,
+ "details": [
+ {"text": "Output includes name", "passed": true},
+ {"text": "Output includes date", "passed": true},
+ {"text": "Format is PDF", "passed": true},
+ {"text": "Contains signature", "passed": false},
+ {"text": "Readable text", "passed": true}
+ ]
+ },
+ "B": {
+ "passed": 3,
+ "total": 5,
+ "pass_rate": 0.60,
+ "details": [
+ {"text": "Output includes name", "passed": true},
+ {"text": "Output includes date", "passed": false},
+ {"text": "Format is PDF", "passed": true},
+ {"text": "Contains signature", "passed": false},
+ {"text": "Readable text", "passed": true}
+ ]
+ }
+ }
+}
+```
+
+If no expectations were provided, omit the `expectation_results` field entirely.
+
+## Field Descriptions
+
+- **winner**: "A", "B", or "TIE"
+- **reasoning**: Clear explanation of why the winner was chosen (or why it's a tie)
+- **rubric**: Structured rubric evaluation for each output
+ - **content**: Scores for content criteria (correctness, completeness, accuracy)
+ - **structure**: Scores for structure criteria (organization, formatting, usability)
+ - **content_score**: Average of content criteria (1-5)
+ - **structure_score**: Average of structure criteria (1-5)
+ - **overall_score**: Combined score scaled to 1-10
+- **output_quality**: Summary quality assessment
+ - **score**: 1-10 rating (should match rubric overall_score)
+ - **strengths**: List of positive aspects
+ - **weaknesses**: List of issues or shortcomings
+- **expectation_results**: (Only if expectations provided)
+ - **passed**: Number of expectations that passed
+ - **total**: Total number of expectations
+ - **pass_rate**: Fraction passed (0.0 to 1.0)
+ - **details**: Individual expectation results
+
+## Guidelines
+
+- **Stay blind**: DO NOT try to infer which skill produced which output. Judge purely on output quality.
+- **Be specific**: Cite specific examples when explaining strengths and weaknesses.
+- **Be decisive**: Choose a winner unless outputs are genuinely equivalent.
+- **Output quality first**: Assertion scores are secondary to overall task completion.
+- **Be objective**: Don't favor outputs based on style preferences; focus on correctness and completeness.
+- **Explain your reasoning**: The reasoning field should make it clear why you chose the winner.
+- **Handle edge cases**: If both outputs fail, pick the one that fails less badly. If both are excellent, pick the one that's marginally better.
diff --git a/.claude/skills/skill-creator/agents/grader.md b/.claude/skills/skill-creator/agents/grader.md
new file mode 100644
index 0000000..558ab05
--- /dev/null
+++ b/.claude/skills/skill-creator/agents/grader.md
@@ -0,0 +1,223 @@
+# Grader Agent
+
+Evaluate expectations against an execution transcript and outputs.
+
+## Role
+
+The Grader reviews a transcript and output files, then determines whether each expectation passes or fails. Provide clear evidence for each judgment.
+
+You have two jobs: grade the outputs, and critique the evals themselves. A passing grade on a weak assertion is worse than useless — it creates false confidence. When you notice an assertion that's trivially satisfied, or an important outcome that no assertion checks, say so.
+
+## Inputs
+
+You receive these parameters in your prompt:
+
+- **expectations**: List of expectations to evaluate (strings)
+- **transcript_path**: Path to the execution transcript (markdown file)
+- **outputs_dir**: Directory containing output files from execution
+
+## Process
+
+### Step 1: Read the Transcript
+
+1. Read the transcript file completely
+2. Note the eval prompt, execution steps, and final result
+3. Identify any issues or errors documented
+
+### Step 2: Examine Output Files
+
+1. List files in outputs_dir
+2. Read/examine each file relevant to the expectations. If outputs aren't plain text, use the inspection tools provided in your prompt — don't rely solely on what the transcript says the executor produced.
+3. Note contents, structure, and quality
+
+### Step 3: Evaluate Each Assertion
+
+For each expectation:
+
+1. **Search for evidence** in the transcript and outputs
+2. **Determine verdict**:
+ - **PASS**: Clear evidence the expectation is true AND the evidence reflects genuine task completion, not just surface-level compliance
+ - **FAIL**: No evidence, or evidence contradicts the expectation, or the evidence is superficial (e.g., correct filename but empty/wrong content)
+3. **Cite the evidence**: Quote the specific text or describe what you found
+
+### Step 4: Extract and Verify Claims
+
+Beyond the predefined expectations, extract implicit claims from the outputs and verify them:
+
+1. **Extract claims** from the transcript and outputs:
+ - Factual statements ("The form has 12 fields")
+ - Process claims ("Used pypdf to fill the form")
+ - Quality claims ("All fields were filled correctly")
+
+2. **Verify each claim**:
+ - **Factual claims**: Can be checked against the outputs or external sources
+ - **Process claims**: Can be verified from the transcript
+ - **Quality claims**: Evaluate whether the claim is justified
+
+3. **Flag unverifiable claims**: Note claims that cannot be verified with available information
+
+This catches issues that predefined expectations might miss.
+
+### Step 5: Read User Notes
+
+If `{outputs_dir}/user_notes.md` exists:
+1. Read it and note any uncertainties or issues flagged by the executor
+2. Include relevant concerns in the grading output
+3. These may reveal problems even when expectations pass
+
+### Step 6: Critique the Evals
+
+After grading, consider whether the evals themselves could be improved. Only surface suggestions when there's a clear gap.
+
+Good suggestions test meaningful outcomes — assertions that are hard to satisfy without actually doing the work correctly. Think about what makes an assertion *discriminating*: it passes when the skill genuinely succeeds and fails when it doesn't.
+
+Suggestions worth raising:
+- An assertion that passed but would also pass for a clearly wrong output (e.g., checking filename existence but not file content)
+- An important outcome you observed — good or bad — that no assertion covers at all
+- An assertion that can't actually be verified from the available outputs
+
+Keep the bar high. The goal is to flag things the eval author would say "good catch" about, not to nitpick every assertion.
+
+### Step 7: Write Grading Results
+
+Save results to `{outputs_dir}/../grading.json` (sibling to outputs_dir).
+
+## Grading Criteria
+
+**PASS when**:
+- The transcript or outputs clearly demonstrate the expectation is true
+- Specific evidence can be cited
+- The evidence reflects genuine substance, not just surface compliance (e.g., a file exists AND contains correct content, not just the right filename)
+
+**FAIL when**:
+- No evidence found for the expectation
+- Evidence contradicts the expectation
+- The expectation cannot be verified from available information
+- The evidence is superficial — the assertion is technically satisfied but the underlying task outcome is wrong or incomplete
+- The output appears to meet the assertion by coincidence rather than by actually doing the work
+
+**When uncertain**: The burden of proof to pass is on the expectation.
+
+### Step 8: Read Executor Metrics and Timing
+
+1. If `{outputs_dir}/metrics.json` exists, read it and include in grading output
+2. If `{outputs_dir}/../timing.json` exists, read it and include timing data
+
+## Output Format
+
+Write a JSON file with this structure:
+
+```json
+{
+ "expectations": [
+ {
+ "text": "The output includes the name 'John Smith'",
+ "passed": true,
+ "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'"
+ },
+ {
+ "text": "The spreadsheet has a SUM formula in cell B10",
+ "passed": false,
+ "evidence": "No spreadsheet was created. The output was a text file."
+ },
+ {
+ "text": "The assistant used the skill's OCR script",
+ "passed": true,
+ "evidence": "Transcript Step 2 shows: 'Tool: Bash - python ocr_script.py image.png'"
+ }
+ ],
+ "summary": {
+ "passed": 2,
+ "failed": 1,
+ "total": 3,
+ "pass_rate": 0.67
+ },
+ "execution_metrics": {
+ "tool_calls": {
+ "Read": 5,
+ "Write": 2,
+ "Bash": 8
+ },
+ "total_tool_calls": 15,
+ "total_steps": 6,
+ "errors_encountered": 0,
+ "output_chars": 12450,
+ "transcript_chars": 3200
+ },
+ "timing": {
+ "executor_duration_seconds": 165.0,
+ "grader_duration_seconds": 26.0,
+ "total_duration_seconds": 191.0
+ },
+ "claims": [
+ {
+ "claim": "The form has 12 fillable fields",
+ "type": "factual",
+ "verified": true,
+ "evidence": "Counted 12 fields in field_info.json"
+ },
+ {
+ "claim": "All required fields were populated",
+ "type": "quality",
+ "verified": false,
+ "evidence": "Reference section was left blank despite data being available"
+ }
+ ],
+ "user_notes_summary": {
+ "uncertainties": ["Used 2023 data, may be stale"],
+ "needs_review": [],
+ "workarounds": ["Fell back to text overlay for non-fillable fields"]
+ },
+ "eval_feedback": {
+ "suggestions": [
+ {
+ "assertion": "The output includes the name 'John Smith'",
+ "reason": "A hallucinated document that mentions the name would also pass — consider checking it appears as the primary contact with matching phone and email from the input"
+ },
+ {
+ "reason": "No assertion checks whether the extracted phone numbers match the input — I observed incorrect numbers in the output that went uncaught"
+ }
+ ],
+ "overall": "Assertions check presence but not correctness. Consider adding content verification."
+ }
+}
+```
+
+## Field Descriptions
+
+- **expectations**: Array of graded expectations
+ - **text**: The original expectation text
+ - **passed**: Boolean - true if expectation passes
+ - **evidence**: Specific quote or description supporting the verdict
+- **summary**: Aggregate statistics
+ - **passed**: Count of passed expectations
+ - **failed**: Count of failed expectations
+ - **total**: Total expectations evaluated
+ - **pass_rate**: Fraction passed (0.0 to 1.0)
+- **execution_metrics**: Copied from executor's metrics.json (if available)
+ - **output_chars**: Total character count of output files (proxy for tokens)
+ - **transcript_chars**: Character count of transcript
+- **timing**: Wall clock timing from timing.json (if available)
+ - **executor_duration_seconds**: Time spent in executor subagent
+ - **total_duration_seconds**: Total elapsed time for the run
+- **claims**: Extracted and verified claims from the output
+ - **claim**: The statement being verified
+ - **type**: "factual", "process", or "quality"
+ - **verified**: Boolean - whether the claim holds
+ - **evidence**: Supporting or contradicting evidence
+- **user_notes_summary**: Issues flagged by the executor
+ - **uncertainties**: Things the executor wasn't sure about
+ - **needs_review**: Items requiring human attention
+ - **workarounds**: Places where the skill didn't work as expected
+- **eval_feedback**: Improvement suggestions for the evals (only when warranted)
+ - **suggestions**: List of concrete suggestions, each with a `reason` and optionally an `assertion` it relates to
+ - **overall**: Brief assessment — can be "No suggestions, evals look solid" if nothing to flag
+
+## Guidelines
+
+- **Be objective**: Base verdicts on evidence, not assumptions
+- **Be specific**: Quote the exact text that supports your verdict
+- **Be thorough**: Check both transcript and output files
+- **Be consistent**: Apply the same standard to each expectation
+- **Explain failures**: Make it clear why evidence was insufficient
+- **No partial credit**: Each expectation is pass or fail, not partial
diff --git a/.claude/skills/skill-creator/assets/eval_review.html b/.claude/skills/skill-creator/assets/eval_review.html
new file mode 100644
index 0000000..938ff32
--- /dev/null
+++ b/.claude/skills/skill-creator/assets/eval_review.html
@@ -0,0 +1,146 @@
+
+
+
+
+
+ Eval Set Review - __SKILL_NAME_PLACEHOLDER__
+
+
+
+
+
+
+ Eval Set Review: __SKILL_NAME_PLACEHOLDER__
+ Current description: __SKILL_DESCRIPTION_PLACEHOLDER__
+
+
+ + Add Query
+ Export Eval Set
+
+
+
+
+
+ Query
+ Should Trigger
+ Actions
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/skill-creator/eval-viewer/generate_review.py b/.claude/skills/skill-creator/eval-viewer/generate_review.py
new file mode 100644
index 0000000..7fa5978
--- /dev/null
+++ b/.claude/skills/skill-creator/eval-viewer/generate_review.py
@@ -0,0 +1,471 @@
+#!/usr/bin/env python3
+"""Generate and serve a review page for eval results.
+
+Reads the workspace directory, discovers runs (directories with outputs/),
+embeds all output data into a self-contained HTML page, and serves it via
+a tiny HTTP server. Feedback auto-saves to feedback.json in the workspace.
+
+Usage:
+ python generate_review.py [--port PORT] [--skill-name NAME]
+ python generate_review.py --previous-feedback /path/to/old/feedback.json
+
+No dependencies beyond the Python stdlib are required.
+"""
+
+import argparse
+import base64
+import json
+import mimetypes
+import os
+import re
+import signal
+import subprocess
+import sys
+import time
+import webbrowser
+from functools import partial
+from http.server import HTTPServer, BaseHTTPRequestHandler
+from pathlib import Path
+
+# Files to exclude from output listings
+METADATA_FILES = {"transcript.md", "user_notes.md", "metrics.json"}
+
+# Extensions we render as inline text
+TEXT_EXTENSIONS = {
+ ".txt", ".md", ".json", ".csv", ".py", ".js", ".ts", ".tsx", ".jsx",
+ ".yaml", ".yml", ".xml", ".html", ".css", ".sh", ".rb", ".go", ".rs",
+ ".java", ".c", ".cpp", ".h", ".hpp", ".sql", ".r", ".toml",
+}
+
+# Extensions we render as inline images
+IMAGE_EXTENSIONS = {".png", ".jpg", ".jpeg", ".gif", ".svg", ".webp"}
+
+# MIME type overrides for common types
+MIME_OVERRIDES = {
+ ".svg": "image/svg+xml",
+ ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+ ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+}
+
+
+def get_mime_type(path: Path) -> str:
+ ext = path.suffix.lower()
+ if ext in MIME_OVERRIDES:
+ return MIME_OVERRIDES[ext]
+ mime, _ = mimetypes.guess_type(str(path))
+ return mime or "application/octet-stream"
+
+
+def find_runs(workspace: Path) -> list[dict]:
+ """Recursively find directories that contain an outputs/ subdirectory."""
+ runs: list[dict] = []
+ _find_runs_recursive(workspace, workspace, runs)
+ runs.sort(key=lambda r: (r.get("eval_id", float("inf")), r["id"]))
+ return runs
+
+
+def _find_runs_recursive(root: Path, current: Path, runs: list[dict]) -> None:
+ if not current.is_dir():
+ return
+
+ outputs_dir = current / "outputs"
+ if outputs_dir.is_dir():
+ run = build_run(root, current)
+ if run:
+ runs.append(run)
+ return
+
+ skip = {"node_modules", ".git", "__pycache__", "skill", "inputs"}
+ for child in sorted(current.iterdir()):
+ if child.is_dir() and child.name not in skip:
+ _find_runs_recursive(root, child, runs)
+
+
+def build_run(root: Path, run_dir: Path) -> dict | None:
+ """Build a run dict with prompt, outputs, and grading data."""
+ prompt = ""
+ eval_id = None
+
+ # Try eval_metadata.json
+ for candidate in [run_dir / "eval_metadata.json", run_dir.parent / "eval_metadata.json"]:
+ if candidate.exists():
+ try:
+ metadata = json.loads(candidate.read_text())
+ prompt = metadata.get("prompt", "")
+ eval_id = metadata.get("eval_id")
+ except (json.JSONDecodeError, OSError):
+ pass
+ if prompt:
+ break
+
+ # Fall back to transcript.md
+ if not prompt:
+ for candidate in [run_dir / "transcript.md", run_dir / "outputs" / "transcript.md"]:
+ if candidate.exists():
+ try:
+ text = candidate.read_text()
+ match = re.search(r"## Eval Prompt\n\n([\s\S]*?)(?=\n##|$)", text)
+ if match:
+ prompt = match.group(1).strip()
+ except OSError:
+ pass
+ if prompt:
+ break
+
+ if not prompt:
+ prompt = "(No prompt found)"
+
+ run_id = str(run_dir.relative_to(root)).replace("/", "-").replace("\\", "-")
+
+ # Collect output files
+ outputs_dir = run_dir / "outputs"
+ output_files: list[dict] = []
+ if outputs_dir.is_dir():
+ for f in sorted(outputs_dir.iterdir()):
+ if f.is_file() and f.name not in METADATA_FILES:
+ output_files.append(embed_file(f))
+
+ # Load grading if present
+ grading = None
+ for candidate in [run_dir / "grading.json", run_dir.parent / "grading.json"]:
+ if candidate.exists():
+ try:
+ grading = json.loads(candidate.read_text())
+ except (json.JSONDecodeError, OSError):
+ pass
+ if grading:
+ break
+
+ return {
+ "id": run_id,
+ "prompt": prompt,
+ "eval_id": eval_id,
+ "outputs": output_files,
+ "grading": grading,
+ }
+
+
+def embed_file(path: Path) -> dict:
+ """Read a file and return an embedded representation."""
+ ext = path.suffix.lower()
+ mime = get_mime_type(path)
+
+ if ext in TEXT_EXTENSIONS:
+ try:
+ content = path.read_text(errors="replace")
+ except OSError:
+ content = "(Error reading file)"
+ return {
+ "name": path.name,
+ "type": "text",
+ "content": content,
+ }
+ elif ext in IMAGE_EXTENSIONS:
+ try:
+ raw = path.read_bytes()
+ b64 = base64.b64encode(raw).decode("ascii")
+ except OSError:
+ return {"name": path.name, "type": "error", "content": "(Error reading file)"}
+ return {
+ "name": path.name,
+ "type": "image",
+ "mime": mime,
+ "data_uri": f"data:{mime};base64,{b64}",
+ }
+ elif ext == ".pdf":
+ try:
+ raw = path.read_bytes()
+ b64 = base64.b64encode(raw).decode("ascii")
+ except OSError:
+ return {"name": path.name, "type": "error", "content": "(Error reading file)"}
+ return {
+ "name": path.name,
+ "type": "pdf",
+ "data_uri": f"data:{mime};base64,{b64}",
+ }
+ elif ext == ".xlsx":
+ try:
+ raw = path.read_bytes()
+ b64 = base64.b64encode(raw).decode("ascii")
+ except OSError:
+ return {"name": path.name, "type": "error", "content": "(Error reading file)"}
+ return {
+ "name": path.name,
+ "type": "xlsx",
+ "data_b64": b64,
+ }
+ else:
+ # Binary / unknown — base64 download link
+ try:
+ raw = path.read_bytes()
+ b64 = base64.b64encode(raw).decode("ascii")
+ except OSError:
+ return {"name": path.name, "type": "error", "content": "(Error reading file)"}
+ return {
+ "name": path.name,
+ "type": "binary",
+ "mime": mime,
+ "data_uri": f"data:{mime};base64,{b64}",
+ }
+
+
+def load_previous_iteration(workspace: Path) -> dict[str, dict]:
+ """Load previous iteration's feedback and outputs.
+
+ Returns a map of run_id -> {"feedback": str, "outputs": list[dict]}.
+ """
+ result: dict[str, dict] = {}
+
+ # Load feedback
+ feedback_map: dict[str, str] = {}
+ feedback_path = workspace / "feedback.json"
+ if feedback_path.exists():
+ try:
+ data = json.loads(feedback_path.read_text())
+ feedback_map = {
+ r["run_id"]: r["feedback"]
+ for r in data.get("reviews", [])
+ if r.get("feedback", "").strip()
+ }
+ except (json.JSONDecodeError, OSError, KeyError):
+ pass
+
+ # Load runs (to get outputs)
+ prev_runs = find_runs(workspace)
+ for run in prev_runs:
+ result[run["id"]] = {
+ "feedback": feedback_map.get(run["id"], ""),
+ "outputs": run.get("outputs", []),
+ }
+
+ # Also add feedback for run_ids that had feedback but no matching run
+ for run_id, fb in feedback_map.items():
+ if run_id not in result:
+ result[run_id] = {"feedback": fb, "outputs": []}
+
+ return result
+
+
+def generate_html(
+ runs: list[dict],
+ skill_name: str,
+ previous: dict[str, dict] | None = None,
+ benchmark: dict | None = None,
+) -> str:
+ """Generate the complete standalone HTML page with embedded data."""
+ template_path = Path(__file__).parent / "viewer.html"
+ template = template_path.read_text()
+
+ # Build previous_feedback and previous_outputs maps for the template
+ previous_feedback: dict[str, str] = {}
+ previous_outputs: dict[str, list[dict]] = {}
+ if previous:
+ for run_id, data in previous.items():
+ if data.get("feedback"):
+ previous_feedback[run_id] = data["feedback"]
+ if data.get("outputs"):
+ previous_outputs[run_id] = data["outputs"]
+
+ embedded = {
+ "skill_name": skill_name,
+ "runs": runs,
+ "previous_feedback": previous_feedback,
+ "previous_outputs": previous_outputs,
+ }
+ if benchmark:
+ embedded["benchmark"] = benchmark
+
+ data_json = json.dumps(embedded)
+
+ return template.replace("/*__EMBEDDED_DATA__*/", f"const EMBEDDED_DATA = {data_json};")
+
+
+# ---------------------------------------------------------------------------
+# HTTP server (stdlib only, zero dependencies)
+# ---------------------------------------------------------------------------
+
+def _kill_port(port: int) -> None:
+ """Kill any process listening on the given port."""
+ try:
+ result = subprocess.run(
+ ["lsof", "-ti", f":{port}"],
+ capture_output=True, text=True, timeout=5,
+ )
+ for pid_str in result.stdout.strip().split("\n"):
+ if pid_str.strip():
+ try:
+ os.kill(int(pid_str.strip()), signal.SIGTERM)
+ except (ProcessLookupError, ValueError):
+ pass
+ if result.stdout.strip():
+ time.sleep(0.5)
+ except subprocess.TimeoutExpired:
+ pass
+ except FileNotFoundError:
+ print("Note: lsof not found, cannot check if port is in use", file=sys.stderr)
+
+class ReviewHandler(BaseHTTPRequestHandler):
+ """Serves the review HTML and handles feedback saves.
+
+ Regenerates the HTML on each page load so that refreshing the browser
+ picks up new eval outputs without restarting the server.
+ """
+
+ def __init__(
+ self,
+ workspace: Path,
+ skill_name: str,
+ feedback_path: Path,
+ previous: dict[str, dict],
+ benchmark_path: Path | None,
+ *args,
+ **kwargs,
+ ):
+ self.workspace = workspace
+ self.skill_name = skill_name
+ self.feedback_path = feedback_path
+ self.previous = previous
+ self.benchmark_path = benchmark_path
+ super().__init__(*args, **kwargs)
+
+ def do_GET(self) -> None:
+ if self.path == "/" or self.path == "/index.html":
+ # Regenerate HTML on each request (re-scans workspace for new outputs)
+ runs = find_runs(self.workspace)
+ benchmark = None
+ if self.benchmark_path and self.benchmark_path.exists():
+ try:
+ benchmark = json.loads(self.benchmark_path.read_text())
+ except (json.JSONDecodeError, OSError):
+ pass
+ html = generate_html(runs, self.skill_name, self.previous, benchmark)
+ content = html.encode("utf-8")
+ self.send_response(200)
+ self.send_header("Content-Type", "text/html; charset=utf-8")
+ self.send_header("Content-Length", str(len(content)))
+ self.end_headers()
+ self.wfile.write(content)
+ elif self.path == "/api/feedback":
+ data = b"{}"
+ if self.feedback_path.exists():
+ data = self.feedback_path.read_bytes()
+ self.send_response(200)
+ self.send_header("Content-Type", "application/json")
+ self.send_header("Content-Length", str(len(data)))
+ self.end_headers()
+ self.wfile.write(data)
+ else:
+ self.send_error(404)
+
+ def do_POST(self) -> None:
+ if self.path == "/api/feedback":
+ length = int(self.headers.get("Content-Length", 0))
+ body = self.rfile.read(length)
+ try:
+ data = json.loads(body)
+ if not isinstance(data, dict) or "reviews" not in data:
+ raise ValueError("Expected JSON object with 'reviews' key")
+ self.feedback_path.write_text(json.dumps(data, indent=2) + "\n")
+ resp = b'{"ok":true}'
+ self.send_response(200)
+ except (json.JSONDecodeError, OSError, ValueError) as e:
+ resp = json.dumps({"error": str(e)}).encode()
+ self.send_response(500)
+ self.send_header("Content-Type", "application/json")
+ self.send_header("Content-Length", str(len(resp)))
+ self.end_headers()
+ self.wfile.write(resp)
+ else:
+ self.send_error(404)
+
+ def log_message(self, format: str, *args: object) -> None:
+ # Suppress request logging to keep terminal clean
+ pass
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description="Generate and serve eval review")
+ parser.add_argument("workspace", type=Path, help="Path to workspace directory")
+ parser.add_argument("--port", "-p", type=int, default=3117, help="Server port (default: 3117)")
+ parser.add_argument("--skill-name", "-n", type=str, default=None, help="Skill name for header")
+ parser.add_argument(
+ "--previous-workspace", type=Path, default=None,
+ help="Path to previous iteration's workspace (shows old outputs and feedback as context)",
+ )
+ parser.add_argument(
+ "--benchmark", type=Path, default=None,
+ help="Path to benchmark.json to show in the Benchmark tab",
+ )
+ parser.add_argument(
+ "--static", "-s", type=Path, default=None,
+ help="Write standalone HTML to this path instead of starting a server",
+ )
+ args = parser.parse_args()
+
+ workspace = args.workspace.resolve()
+ if not workspace.is_dir():
+ print(f"Error: {workspace} is not a directory", file=sys.stderr)
+ sys.exit(1)
+
+ runs = find_runs(workspace)
+ if not runs:
+ print(f"No runs found in {workspace}", file=sys.stderr)
+ sys.exit(1)
+
+ skill_name = args.skill_name or workspace.name.replace("-workspace", "")
+ feedback_path = workspace / "feedback.json"
+
+ previous: dict[str, dict] = {}
+ if args.previous_workspace:
+ previous = load_previous_iteration(args.previous_workspace.resolve())
+
+ benchmark_path = args.benchmark.resolve() if args.benchmark else None
+ benchmark = None
+ if benchmark_path and benchmark_path.exists():
+ try:
+ benchmark = json.loads(benchmark_path.read_text())
+ except (json.JSONDecodeError, OSError):
+ pass
+
+ if args.static:
+ html = generate_html(runs, skill_name, previous, benchmark)
+ args.static.parent.mkdir(parents=True, exist_ok=True)
+ args.static.write_text(html)
+ print(f"\n Static viewer written to: {args.static}\n")
+ sys.exit(0)
+
+ # Kill any existing process on the target port
+ port = args.port
+ _kill_port(port)
+ handler = partial(ReviewHandler, workspace, skill_name, feedback_path, previous, benchmark_path)
+ try:
+ server = HTTPServer(("127.0.0.1", port), handler)
+ except OSError:
+ # Port still in use after kill attempt — find a free one
+ server = HTTPServer(("127.0.0.1", 0), handler)
+ port = server.server_address[1]
+
+ url = f"http://localhost:{port}"
+ print(f"\n Eval Viewer")
+ print(f" ─────────────────────────────────")
+ print(f" URL: {url}")
+ print(f" Workspace: {workspace}")
+ print(f" Feedback: {feedback_path}")
+ if previous:
+ print(f" Previous: {args.previous_workspace} ({len(previous)} runs)")
+ if benchmark_path:
+ print(f" Benchmark: {benchmark_path}")
+ print(f"\n Press Ctrl+C to stop.\n")
+
+ webbrowser.open(url)
+
+ try:
+ server.serve_forever()
+ except KeyboardInterrupt:
+ print("\nStopped.")
+ server.server_close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/eval-viewer/viewer.html b/.claude/skills/skill-creator/eval-viewer/viewer.html
new file mode 100644
index 0000000..6d8e963
--- /dev/null
+++ b/.claude/skills/skill-creator/eval-viewer/viewer.html
@@ -0,0 +1,1325 @@
+
+
+
+
+
+ Eval Review
+
+
+
+
+
+
+
+
+
+
+
+
+ Outputs
+ Benchmark
+
+
+
+
+
+
+
+
+
+
+
+
+
No output files found
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ← Previous
+ Submit All Reviews
+ Next →
+
+
+
+
+
+
+
No benchmark data available. Run a benchmark to see quantitative results here.
+
+
+
+
+
+
+
+
Review Complete
+
Your feedback has been saved. Go back to your Claude Code session and tell Claude you're done reviewing.
+
+ OK
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/skill-creator/references/schemas.md b/.claude/skills/skill-creator/references/schemas.md
new file mode 100644
index 0000000..b6eeaa2
--- /dev/null
+++ b/.claude/skills/skill-creator/references/schemas.md
@@ -0,0 +1,430 @@
+# JSON Schemas
+
+This document defines the JSON schemas used by skill-creator.
+
+---
+
+## evals.json
+
+Defines the evals for a skill. Located at `evals/evals.json` within the skill directory.
+
+```json
+{
+ "skill_name": "example-skill",
+ "evals": [
+ {
+ "id": 1,
+ "prompt": "User's example prompt",
+ "expected_output": "Description of expected result",
+ "files": ["evals/files/sample1.pdf"],
+ "expectations": [
+ "The output includes X",
+ "The skill used script Y"
+ ]
+ }
+ ]
+}
+```
+
+**Fields:**
+- `skill_name`: Name matching the skill's frontmatter
+- `evals[].id`: Unique integer identifier
+- `evals[].prompt`: The task to execute
+- `evals[].expected_output`: Human-readable description of success
+- `evals[].files`: Optional list of input file paths (relative to skill root)
+- `evals[].expectations`: List of verifiable statements
+
+---
+
+## history.json
+
+Tracks version progression in Improve mode. Located at workspace root.
+
+```json
+{
+ "started_at": "2026-01-15T10:30:00Z",
+ "skill_name": "pdf",
+ "current_best": "v2",
+ "iterations": [
+ {
+ "version": "v0",
+ "parent": null,
+ "expectation_pass_rate": 0.65,
+ "grading_result": "baseline",
+ "is_current_best": false
+ },
+ {
+ "version": "v1",
+ "parent": "v0",
+ "expectation_pass_rate": 0.75,
+ "grading_result": "won",
+ "is_current_best": false
+ },
+ {
+ "version": "v2",
+ "parent": "v1",
+ "expectation_pass_rate": 0.85,
+ "grading_result": "won",
+ "is_current_best": true
+ }
+ ]
+}
+```
+
+**Fields:**
+- `started_at`: ISO timestamp of when improvement started
+- `skill_name`: Name of the skill being improved
+- `current_best`: Version identifier of the best performer
+- `iterations[].version`: Version identifier (v0, v1, ...)
+- `iterations[].parent`: Parent version this was derived from
+- `iterations[].expectation_pass_rate`: Pass rate from grading
+- `iterations[].grading_result`: "baseline", "won", "lost", or "tie"
+- `iterations[].is_current_best`: Whether this is the current best version
+
+---
+
+## grading.json
+
+Output from the grader agent. Located at `/grading.json`.
+
+```json
+{
+ "expectations": [
+ {
+ "text": "The output includes the name 'John Smith'",
+ "passed": true,
+ "evidence": "Found in transcript Step 3: 'Extracted names: John Smith, Sarah Johnson'"
+ },
+ {
+ "text": "The spreadsheet has a SUM formula in cell B10",
+ "passed": false,
+ "evidence": "No spreadsheet was created. The output was a text file."
+ }
+ ],
+ "summary": {
+ "passed": 2,
+ "failed": 1,
+ "total": 3,
+ "pass_rate": 0.67
+ },
+ "execution_metrics": {
+ "tool_calls": {
+ "Read": 5,
+ "Write": 2,
+ "Bash": 8
+ },
+ "total_tool_calls": 15,
+ "total_steps": 6,
+ "errors_encountered": 0,
+ "output_chars": 12450,
+ "transcript_chars": 3200
+ },
+ "timing": {
+ "executor_duration_seconds": 165.0,
+ "grader_duration_seconds": 26.0,
+ "total_duration_seconds": 191.0
+ },
+ "claims": [
+ {
+ "claim": "The form has 12 fillable fields",
+ "type": "factual",
+ "verified": true,
+ "evidence": "Counted 12 fields in field_info.json"
+ }
+ ],
+ "user_notes_summary": {
+ "uncertainties": ["Used 2023 data, may be stale"],
+ "needs_review": [],
+ "workarounds": ["Fell back to text overlay for non-fillable fields"]
+ },
+ "eval_feedback": {
+ "suggestions": [
+ {
+ "assertion": "The output includes the name 'John Smith'",
+ "reason": "A hallucinated document that mentions the name would also pass"
+ }
+ ],
+ "overall": "Assertions check presence but not correctness."
+ }
+}
+```
+
+**Fields:**
+- `expectations[]`: Graded expectations with evidence
+- `summary`: Aggregate pass/fail counts
+- `execution_metrics`: Tool usage and output size (from executor's metrics.json)
+- `timing`: Wall clock timing (from timing.json)
+- `claims`: Extracted and verified claims from the output
+- `user_notes_summary`: Issues flagged by the executor
+- `eval_feedback`: (optional) Improvement suggestions for the evals, only present when the grader identifies issues worth raising
+
+---
+
+## metrics.json
+
+Output from the executor agent. Located at `/outputs/metrics.json`.
+
+```json
+{
+ "tool_calls": {
+ "Read": 5,
+ "Write": 2,
+ "Bash": 8,
+ "Edit": 1,
+ "Glob": 2,
+ "Grep": 0
+ },
+ "total_tool_calls": 18,
+ "total_steps": 6,
+ "files_created": ["filled_form.pdf", "field_values.json"],
+ "errors_encountered": 0,
+ "output_chars": 12450,
+ "transcript_chars": 3200
+}
+```
+
+**Fields:**
+- `tool_calls`: Count per tool type
+- `total_tool_calls`: Sum of all tool calls
+- `total_steps`: Number of major execution steps
+- `files_created`: List of output files created
+- `errors_encountered`: Number of errors during execution
+- `output_chars`: Total character count of output files
+- `transcript_chars`: Character count of transcript
+
+---
+
+## timing.json
+
+Wall clock timing for a run. Located at `/timing.json`.
+
+**How to capture:** When a subagent task completes, the task notification includes `total_tokens` and `duration_ms`. Save these immediately — they are not persisted anywhere else and cannot be recovered after the fact.
+
+```json
+{
+ "total_tokens": 84852,
+ "duration_ms": 23332,
+ "total_duration_seconds": 23.3,
+ "executor_start": "2026-01-15T10:30:00Z",
+ "executor_end": "2026-01-15T10:32:45Z",
+ "executor_duration_seconds": 165.0,
+ "grader_start": "2026-01-15T10:32:46Z",
+ "grader_end": "2026-01-15T10:33:12Z",
+ "grader_duration_seconds": 26.0
+}
+```
+
+---
+
+## benchmark.json
+
+Output from Benchmark mode. Located at `benchmarks//benchmark.json`.
+
+```json
+{
+ "metadata": {
+ "skill_name": "pdf",
+ "skill_path": "/path/to/pdf",
+ "executor_model": "claude-sonnet-4-20250514",
+ "analyzer_model": "most-capable-model",
+ "timestamp": "2026-01-15T10:30:00Z",
+ "evals_run": [1, 2, 3],
+ "runs_per_configuration": 3
+ },
+
+ "runs": [
+ {
+ "eval_id": 1,
+ "eval_name": "Ocean",
+ "configuration": "with_skill",
+ "run_number": 1,
+ "result": {
+ "pass_rate": 0.85,
+ "passed": 6,
+ "failed": 1,
+ "total": 7,
+ "time_seconds": 42.5,
+ "tokens": 3800,
+ "tool_calls": 18,
+ "errors": 0
+ },
+ "expectations": [
+ {"text": "...", "passed": true, "evidence": "..."}
+ ],
+ "notes": [
+ "Used 2023 data, may be stale",
+ "Fell back to text overlay for non-fillable fields"
+ ]
+ }
+ ],
+
+ "run_summary": {
+ "with_skill": {
+ "pass_rate": {"mean": 0.85, "stddev": 0.05, "min": 0.80, "max": 0.90},
+ "time_seconds": {"mean": 45.0, "stddev": 12.0, "min": 32.0, "max": 58.0},
+ "tokens": {"mean": 3800, "stddev": 400, "min": 3200, "max": 4100}
+ },
+ "without_skill": {
+ "pass_rate": {"mean": 0.35, "stddev": 0.08, "min": 0.28, "max": 0.45},
+ "time_seconds": {"mean": 32.0, "stddev": 8.0, "min": 24.0, "max": 42.0},
+ "tokens": {"mean": 2100, "stddev": 300, "min": 1800, "max": 2500}
+ },
+ "delta": {
+ "pass_rate": "+0.50",
+ "time_seconds": "+13.0",
+ "tokens": "+1700"
+ }
+ },
+
+ "notes": [
+ "Assertion 'Output is a PDF file' passes 100% in both configurations - may not differentiate skill value",
+ "Eval 3 shows high variance (50% ± 40%) - may be flaky or model-dependent",
+ "Without-skill runs consistently fail on table extraction expectations",
+ "Skill adds 13s average execution time but improves pass rate by 50%"
+ ]
+}
+```
+
+**Fields:**
+- `metadata`: Information about the benchmark run
+ - `skill_name`: Name of the skill
+ - `timestamp`: When the benchmark was run
+ - `evals_run`: List of eval names or IDs
+ - `runs_per_configuration`: Number of runs per config (e.g. 3)
+- `runs[]`: Individual run results
+ - `eval_id`: Numeric eval identifier
+ - `eval_name`: Human-readable eval name (used as section header in the viewer)
+ - `configuration`: Must be `"with_skill"` or `"without_skill"` (the viewer uses this exact string for grouping and color coding)
+ - `run_number`: Integer run number (1, 2, 3...)
+ - `result`: Nested object with `pass_rate`, `passed`, `total`, `time_seconds`, `tokens`, `errors`
+- `run_summary`: Statistical aggregates per configuration
+ - `with_skill` / `without_skill`: Each contains `pass_rate`, `time_seconds`, `tokens` objects with `mean` and `stddev` fields
+ - `delta`: Difference strings like `"+0.50"`, `"+13.0"`, `"+1700"`
+- `notes`: Freeform observations from the analyzer
+
+**Important:** The viewer reads these field names exactly. Using `config` instead of `configuration`, or putting `pass_rate` at the top level of a run instead of nested under `result`, will cause the viewer to show empty/zero values. Always reference this schema when generating benchmark.json manually.
+
+---
+
+## comparison.json
+
+Output from blind comparator. Located at `/comparison-N.json`.
+
+```json
+{
+ "winner": "A",
+ "reasoning": "Output A provides a complete solution with proper formatting and all required fields. Output B is missing the date field and has formatting inconsistencies.",
+ "rubric": {
+ "A": {
+ "content": {
+ "correctness": 5,
+ "completeness": 5,
+ "accuracy": 4
+ },
+ "structure": {
+ "organization": 4,
+ "formatting": 5,
+ "usability": 4
+ },
+ "content_score": 4.7,
+ "structure_score": 4.3,
+ "overall_score": 9.0
+ },
+ "B": {
+ "content": {
+ "correctness": 3,
+ "completeness": 2,
+ "accuracy": 3
+ },
+ "structure": {
+ "organization": 3,
+ "formatting": 2,
+ "usability": 3
+ },
+ "content_score": 2.7,
+ "structure_score": 2.7,
+ "overall_score": 5.4
+ }
+ },
+ "output_quality": {
+ "A": {
+ "score": 9,
+ "strengths": ["Complete solution", "Well-formatted", "All fields present"],
+ "weaknesses": ["Minor style inconsistency in header"]
+ },
+ "B": {
+ "score": 5,
+ "strengths": ["Readable output", "Correct basic structure"],
+ "weaknesses": ["Missing date field", "Formatting inconsistencies", "Partial data extraction"]
+ }
+ },
+ "expectation_results": {
+ "A": {
+ "passed": 4,
+ "total": 5,
+ "pass_rate": 0.80,
+ "details": [
+ {"text": "Output includes name", "passed": true}
+ ]
+ },
+ "B": {
+ "passed": 3,
+ "total": 5,
+ "pass_rate": 0.60,
+ "details": [
+ {"text": "Output includes name", "passed": true}
+ ]
+ }
+ }
+}
+```
+
+---
+
+## analysis.json
+
+Output from post-hoc analyzer. Located at `/analysis.json`.
+
+```json
+{
+ "comparison_summary": {
+ "winner": "A",
+ "winner_skill": "path/to/winner/skill",
+ "loser_skill": "path/to/loser/skill",
+ "comparator_reasoning": "Brief summary of why comparator chose winner"
+ },
+ "winner_strengths": [
+ "Clear step-by-step instructions for handling multi-page documents",
+ "Included validation script that caught formatting errors"
+ ],
+ "loser_weaknesses": [
+ "Vague instruction 'process the document appropriately' led to inconsistent behavior",
+ "No script for validation, agent had to improvise"
+ ],
+ "instruction_following": {
+ "winner": {
+ "score": 9,
+ "issues": ["Minor: skipped optional logging step"]
+ },
+ "loser": {
+ "score": 6,
+ "issues": [
+ "Did not use the skill's formatting template",
+ "Invented own approach instead of following step 3"
+ ]
+ }
+ },
+ "improvement_suggestions": [
+ {
+ "priority": "high",
+ "category": "instructions",
+ "suggestion": "Replace 'process the document appropriately' with explicit steps",
+ "expected_impact": "Would eliminate ambiguity that caused inconsistent behavior"
+ }
+ ],
+ "transcript_insights": {
+ "winner_execution_pattern": "Read skill -> Followed 5-step process -> Used validation script",
+ "loser_execution_pattern": "Read skill -> Unclear on approach -> Tried 3 different methods"
+ }
+}
+```
diff --git a/.claude/skills/skill-creator/scripts/__init__.py b/.claude/skills/skill-creator/scripts/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.claude/skills/skill-creator/scripts/aggregate_benchmark.py b/.claude/skills/skill-creator/scripts/aggregate_benchmark.py
new file mode 100644
index 0000000..3e66e8c
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/aggregate_benchmark.py
@@ -0,0 +1,401 @@
+#!/usr/bin/env python3
+"""
+Aggregate individual run results into benchmark summary statistics.
+
+Reads grading.json files from run directories and produces:
+- run_summary with mean, stddev, min, max for each metric
+- delta between with_skill and without_skill configurations
+
+Usage:
+ python aggregate_benchmark.py
+
+Example:
+ python aggregate_benchmark.py benchmarks/2026-01-15T10-30-00/
+
+The script supports two directory layouts:
+
+ Workspace layout (from skill-creator iterations):
+ /
+ └── eval-N/
+ ├── with_skill/
+ │ ├── run-1/grading.json
+ │ └── run-2/grading.json
+ └── without_skill/
+ ├── run-1/grading.json
+ └── run-2/grading.json
+
+ Legacy layout (with runs/ subdirectory):
+ /
+ └── runs/
+ └── eval-N/
+ ├── with_skill/
+ │ └── run-1/grading.json
+ └── without_skill/
+ └── run-1/grading.json
+"""
+
+import argparse
+import json
+import math
+import sys
+from datetime import datetime, timezone
+from pathlib import Path
+
+
+def calculate_stats(values: list[float]) -> dict:
+ """Calculate mean, stddev, min, max for a list of values."""
+ if not values:
+ return {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0}
+
+ n = len(values)
+ mean = sum(values) / n
+
+ if n > 1:
+ variance = sum((x - mean) ** 2 for x in values) / (n - 1)
+ stddev = math.sqrt(variance)
+ else:
+ stddev = 0.0
+
+ return {
+ "mean": round(mean, 4),
+ "stddev": round(stddev, 4),
+ "min": round(min(values), 4),
+ "max": round(max(values), 4)
+ }
+
+
+def load_run_results(benchmark_dir: Path) -> dict:
+ """
+ Load all run results from a benchmark directory.
+
+ Returns dict keyed by config name (e.g. "with_skill"/"without_skill",
+ or "new_skill"/"old_skill"), each containing a list of run results.
+ """
+ # Support both layouts: eval dirs directly under benchmark_dir, or under runs/
+ runs_dir = benchmark_dir / "runs"
+ if runs_dir.exists():
+ search_dir = runs_dir
+ elif list(benchmark_dir.glob("eval-*")):
+ search_dir = benchmark_dir
+ else:
+ print(f"No eval directories found in {benchmark_dir} or {benchmark_dir / 'runs'}")
+ return {}
+
+ results: dict[str, list] = {}
+
+ for eval_idx, eval_dir in enumerate(sorted(search_dir.glob("eval-*"))):
+ metadata_path = eval_dir / "eval_metadata.json"
+ if metadata_path.exists():
+ try:
+ with open(metadata_path) as mf:
+ eval_id = json.load(mf).get("eval_id", eval_idx)
+ except (json.JSONDecodeError, OSError):
+ eval_id = eval_idx
+ else:
+ try:
+ eval_id = int(eval_dir.name.split("-")[1])
+ except ValueError:
+ eval_id = eval_idx
+
+ # Discover config directories dynamically rather than hardcoding names
+ for config_dir in sorted(eval_dir.iterdir()):
+ if not config_dir.is_dir():
+ continue
+ # Skip non-config directories (inputs, outputs, etc.)
+ if not list(config_dir.glob("run-*")):
+ continue
+ config = config_dir.name
+ if config not in results:
+ results[config] = []
+
+ for run_dir in sorted(config_dir.glob("run-*")):
+ run_number = int(run_dir.name.split("-")[1])
+ grading_file = run_dir / "grading.json"
+
+ if not grading_file.exists():
+ print(f"Warning: grading.json not found in {run_dir}")
+ continue
+
+ try:
+ with open(grading_file) as f:
+ grading = json.load(f)
+ except json.JSONDecodeError as e:
+ print(f"Warning: Invalid JSON in {grading_file}: {e}")
+ continue
+
+ # Extract metrics
+ result = {
+ "eval_id": eval_id,
+ "run_number": run_number,
+ "pass_rate": grading.get("summary", {}).get("pass_rate", 0.0),
+ "passed": grading.get("summary", {}).get("passed", 0),
+ "failed": grading.get("summary", {}).get("failed", 0),
+ "total": grading.get("summary", {}).get("total", 0),
+ }
+
+ # Extract timing — check grading.json first, then sibling timing.json
+ timing = grading.get("timing", {})
+ result["time_seconds"] = timing.get("total_duration_seconds", 0.0)
+ timing_file = run_dir / "timing.json"
+ if result["time_seconds"] == 0.0 and timing_file.exists():
+ try:
+ with open(timing_file) as tf:
+ timing_data = json.load(tf)
+ result["time_seconds"] = timing_data.get("total_duration_seconds", 0.0)
+ result["tokens"] = timing_data.get("total_tokens", 0)
+ except json.JSONDecodeError:
+ pass
+
+ # Extract metrics if available
+ metrics = grading.get("execution_metrics", {})
+ result["tool_calls"] = metrics.get("total_tool_calls", 0)
+ if not result.get("tokens"):
+ result["tokens"] = metrics.get("output_chars", 0)
+ result["errors"] = metrics.get("errors_encountered", 0)
+
+ # Extract expectations — viewer requires fields: text, passed, evidence
+ raw_expectations = grading.get("expectations", [])
+ for exp in raw_expectations:
+ if "text" not in exp or "passed" not in exp:
+ print(f"Warning: expectation in {grading_file} missing required fields (text, passed, evidence): {exp}")
+ result["expectations"] = raw_expectations
+
+ # Extract notes from user_notes_summary
+ notes_summary = grading.get("user_notes_summary", {})
+ notes = []
+ notes.extend(notes_summary.get("uncertainties", []))
+ notes.extend(notes_summary.get("needs_review", []))
+ notes.extend(notes_summary.get("workarounds", []))
+ result["notes"] = notes
+
+ results[config].append(result)
+
+ return results
+
+
+def aggregate_results(results: dict) -> dict:
+ """
+ Aggregate run results into summary statistics.
+
+ Returns run_summary with stats for each configuration and delta.
+ """
+ run_summary = {}
+ configs = list(results.keys())
+
+ for config in configs:
+ runs = results.get(config, [])
+
+ if not runs:
+ run_summary[config] = {
+ "pass_rate": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0},
+ "time_seconds": {"mean": 0.0, "stddev": 0.0, "min": 0.0, "max": 0.0},
+ "tokens": {"mean": 0, "stddev": 0, "min": 0, "max": 0}
+ }
+ continue
+
+ pass_rates = [r["pass_rate"] for r in runs]
+ times = [r["time_seconds"] for r in runs]
+ tokens = [r.get("tokens", 0) for r in runs]
+
+ run_summary[config] = {
+ "pass_rate": calculate_stats(pass_rates),
+ "time_seconds": calculate_stats(times),
+ "tokens": calculate_stats(tokens)
+ }
+
+ # Calculate delta between the first two configs (if two exist)
+ if len(configs) >= 2:
+ primary = run_summary.get(configs[0], {})
+ baseline = run_summary.get(configs[1], {})
+ else:
+ primary = run_summary.get(configs[0], {}) if configs else {}
+ baseline = {}
+
+ delta_pass_rate = primary.get("pass_rate", {}).get("mean", 0) - baseline.get("pass_rate", {}).get("mean", 0)
+ delta_time = primary.get("time_seconds", {}).get("mean", 0) - baseline.get("time_seconds", {}).get("mean", 0)
+ delta_tokens = primary.get("tokens", {}).get("mean", 0) - baseline.get("tokens", {}).get("mean", 0)
+
+ run_summary["delta"] = {
+ "pass_rate": f"{delta_pass_rate:+.2f}",
+ "time_seconds": f"{delta_time:+.1f}",
+ "tokens": f"{delta_tokens:+.0f}"
+ }
+
+ return run_summary
+
+
+def generate_benchmark(benchmark_dir: Path, skill_name: str = "", skill_path: str = "") -> dict:
+ """
+ Generate complete benchmark.json from run results.
+ """
+ results = load_run_results(benchmark_dir)
+ run_summary = aggregate_results(results)
+
+ # Build runs array for benchmark.json
+ runs = []
+ for config in results:
+ for result in results[config]:
+ runs.append({
+ "eval_id": result["eval_id"],
+ "configuration": config,
+ "run_number": result["run_number"],
+ "result": {
+ "pass_rate": result["pass_rate"],
+ "passed": result["passed"],
+ "failed": result["failed"],
+ "total": result["total"],
+ "time_seconds": result["time_seconds"],
+ "tokens": result.get("tokens", 0),
+ "tool_calls": result.get("tool_calls", 0),
+ "errors": result.get("errors", 0)
+ },
+ "expectations": result["expectations"],
+ "notes": result["notes"]
+ })
+
+ # Determine eval IDs from results
+ eval_ids = sorted(set(
+ r["eval_id"]
+ for config in results.values()
+ for r in config
+ ))
+
+ benchmark = {
+ "metadata": {
+ "skill_name": skill_name or "",
+ "skill_path": skill_path or "",
+ "executor_model": "",
+ "analyzer_model": "",
+ "timestamp": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
+ "evals_run": eval_ids,
+ "runs_per_configuration": 3
+ },
+ "runs": runs,
+ "run_summary": run_summary,
+ "notes": [] # To be filled by analyzer
+ }
+
+ return benchmark
+
+
+def generate_markdown(benchmark: dict) -> str:
+ """Generate human-readable benchmark.md from benchmark data."""
+ metadata = benchmark["metadata"]
+ run_summary = benchmark["run_summary"]
+
+ # Determine config names (excluding "delta")
+ configs = [k for k in run_summary if k != "delta"]
+ config_a = configs[0] if len(configs) >= 1 else "config_a"
+ config_b = configs[1] if len(configs) >= 2 else "config_b"
+ label_a = config_a.replace("_", " ").title()
+ label_b = config_b.replace("_", " ").title()
+
+ lines = [
+ f"# Skill Benchmark: {metadata['skill_name']}",
+ "",
+ f"**Model**: {metadata['executor_model']}",
+ f"**Date**: {metadata['timestamp']}",
+ f"**Evals**: {', '.join(map(str, metadata['evals_run']))} ({metadata['runs_per_configuration']} runs each per configuration)",
+ "",
+ "## Summary",
+ "",
+ f"| Metric | {label_a} | {label_b} | Delta |",
+ "|--------|------------|---------------|-------|",
+ ]
+
+ a_summary = run_summary.get(config_a, {})
+ b_summary = run_summary.get(config_b, {})
+ delta = run_summary.get("delta", {})
+
+ # Format pass rate
+ a_pr = a_summary.get("pass_rate", {})
+ b_pr = b_summary.get("pass_rate", {})
+ lines.append(f"| Pass Rate | {a_pr.get('mean', 0)*100:.0f}% ± {a_pr.get('stddev', 0)*100:.0f}% | {b_pr.get('mean', 0)*100:.0f}% ± {b_pr.get('stddev', 0)*100:.0f}% | {delta.get('pass_rate', '—')} |")
+
+ # Format time
+ a_time = a_summary.get("time_seconds", {})
+ b_time = b_summary.get("time_seconds", {})
+ lines.append(f"| Time | {a_time.get('mean', 0):.1f}s ± {a_time.get('stddev', 0):.1f}s | {b_time.get('mean', 0):.1f}s ± {b_time.get('stddev', 0):.1f}s | {delta.get('time_seconds', '—')}s |")
+
+ # Format tokens
+ a_tokens = a_summary.get("tokens", {})
+ b_tokens = b_summary.get("tokens", {})
+ lines.append(f"| Tokens | {a_tokens.get('mean', 0):.0f} ± {a_tokens.get('stddev', 0):.0f} | {b_tokens.get('mean', 0):.0f} ± {b_tokens.get('stddev', 0):.0f} | {delta.get('tokens', '—')} |")
+
+ # Notes section
+ if benchmark.get("notes"):
+ lines.extend([
+ "",
+ "## Notes",
+ ""
+ ])
+ for note in benchmark["notes"]:
+ lines.append(f"- {note}")
+
+ return "\n".join(lines)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Aggregate benchmark run results into summary statistics"
+ )
+ parser.add_argument(
+ "benchmark_dir",
+ type=Path,
+ help="Path to the benchmark directory"
+ )
+ parser.add_argument(
+ "--skill-name",
+ default="",
+ help="Name of the skill being benchmarked"
+ )
+ parser.add_argument(
+ "--skill-path",
+ default="",
+ help="Path to the skill being benchmarked"
+ )
+ parser.add_argument(
+ "--output", "-o",
+ type=Path,
+ help="Output path for benchmark.json (default: /benchmark.json)"
+ )
+
+ args = parser.parse_args()
+
+ if not args.benchmark_dir.exists():
+ print(f"Directory not found: {args.benchmark_dir}")
+ sys.exit(1)
+
+ # Generate benchmark
+ benchmark = generate_benchmark(args.benchmark_dir, args.skill_name, args.skill_path)
+
+ # Determine output paths
+ output_json = args.output or (args.benchmark_dir / "benchmark.json")
+ output_md = output_json.with_suffix(".md")
+
+ # Write benchmark.json
+ with open(output_json, "w") as f:
+ json.dump(benchmark, f, indent=2)
+ print(f"Generated: {output_json}")
+
+ # Write benchmark.md
+ markdown = generate_markdown(benchmark)
+ with open(output_md, "w") as f:
+ f.write(markdown)
+ print(f"Generated: {output_md}")
+
+ # Print summary
+ run_summary = benchmark["run_summary"]
+ configs = [k for k in run_summary if k != "delta"]
+ delta = run_summary.get("delta", {})
+
+ print(f"\nSummary:")
+ for config in configs:
+ pr = run_summary[config]["pass_rate"]["mean"]
+ label = config.replace("_", " ").title()
+ print(f" {label}: {pr*100:.1f}% pass rate")
+ print(f" Delta: {delta.get('pass_rate', '—')}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/scripts/generate_report.py b/.claude/skills/skill-creator/scripts/generate_report.py
new file mode 100644
index 0000000..959e30a
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/generate_report.py
@@ -0,0 +1,326 @@
+#!/usr/bin/env python3
+"""Generate an HTML report from run_loop.py output.
+
+Takes the JSON output from run_loop.py and generates a visual HTML report
+showing each description attempt with check/x for each test case.
+Distinguishes between train and test queries.
+"""
+
+import argparse
+import html
+import json
+import sys
+from pathlib import Path
+
+
+def generate_html(data: dict, auto_refresh: bool = False, skill_name: str = "") -> str:
+ """Generate HTML report from loop output data. If auto_refresh is True, adds a meta refresh tag."""
+ history = data.get("history", [])
+ holdout = data.get("holdout", 0)
+ title_prefix = html.escape(skill_name + " \u2014 ") if skill_name else ""
+
+ # Get all unique queries from train and test sets, with should_trigger info
+ train_queries: list[dict] = []
+ test_queries: list[dict] = []
+ if history:
+ for r in history[0].get("train_results", history[0].get("results", [])):
+ train_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)})
+ if history[0].get("test_results"):
+ for r in history[0].get("test_results", []):
+ test_queries.append({"query": r["query"], "should_trigger": r.get("should_trigger", True)})
+
+ refresh_tag = ' \n' if auto_refresh else ""
+
+ html_parts = ["""
+
+
+
+""" + refresh_tag + """ """ + title_prefix + """Skill Description Optimization
+
+
+
+
+
+
+ """ + title_prefix + """Skill Description Optimization
+
+ Optimizing your skill's description. This page updates automatically as Claude tests different versions of your skill's description. Each row is an iteration — a new description attempt. The columns show test queries: green checkmarks mean the skill triggered correctly (or correctly didn't trigger), red crosses mean it got it wrong. The "Train" score shows performance on queries used to improve the description; the "Test" score shows performance on held-out queries the optimizer hasn't seen. When it's done, Claude will apply the best-performing description to your skill.
+
+"""]
+
+ # Summary section
+ best_test_score = data.get('best_test_score')
+ best_train_score = data.get('best_train_score')
+ html_parts.append(f"""
+
+
Original: {html.escape(data.get('original_description', 'N/A'))}
+
Best: {html.escape(data.get('best_description', 'N/A'))}
+
Best Score: {data.get('best_score', 'N/A')} {'(test)' if best_test_score else '(train)'}
+
Iterations: {data.get('iterations_run', 0)} | Train: {data.get('train_size', '?')} | Test: {data.get('test_size', '?')}
+
+""")
+
+ # Legend
+ html_parts.append("""
+
+ Query columns:
+ Should trigger
+ Should NOT trigger
+ Train
+ Test
+
+""")
+
+ # Table header
+ html_parts.append("""
+
+
+
+
+ Iter
+ Train
+ Test
+ Description
+""")
+
+ # Add column headers for train queries
+ for qinfo in train_queries:
+ polarity = "positive-col" if qinfo["should_trigger"] else "negative-col"
+ html_parts.append(f' {html.escape(qinfo["query"])} \n')
+
+ # Add column headers for test queries (different color)
+ for qinfo in test_queries:
+ polarity = "positive-col" if qinfo["should_trigger"] else "negative-col"
+ html_parts.append(f' {html.escape(qinfo["query"])} \n')
+
+ html_parts.append("""
+
+
+""")
+
+ # Find best iteration for highlighting
+ if test_queries:
+ best_iter = max(history, key=lambda h: h.get("test_passed") or 0).get("iteration")
+ else:
+ best_iter = max(history, key=lambda h: h.get("train_passed", h.get("passed", 0))).get("iteration")
+
+ # Add rows for each iteration
+ for h in history:
+ iteration = h.get("iteration", "?")
+ train_passed = h.get("train_passed", h.get("passed", 0))
+ train_total = h.get("train_total", h.get("total", 0))
+ test_passed = h.get("test_passed")
+ test_total = h.get("test_total")
+ description = h.get("description", "")
+ train_results = h.get("train_results", h.get("results", []))
+ test_results = h.get("test_results", [])
+
+ # Create lookups for results by query
+ train_by_query = {r["query"]: r for r in train_results}
+ test_by_query = {r["query"]: r for r in test_results} if test_results else {}
+
+ # Compute aggregate correct/total runs across all retries
+ def aggregate_runs(results: list[dict]) -> tuple[int, int]:
+ correct = 0
+ total = 0
+ for r in results:
+ runs = r.get("runs", 0)
+ triggers = r.get("triggers", 0)
+ total += runs
+ if r.get("should_trigger", True):
+ correct += triggers
+ else:
+ correct += runs - triggers
+ return correct, total
+
+ train_correct, train_runs = aggregate_runs(train_results)
+ test_correct, test_runs = aggregate_runs(test_results)
+
+ # Determine score classes
+ def score_class(correct: int, total: int) -> str:
+ if total > 0:
+ ratio = correct / total
+ if ratio >= 0.8:
+ return "score-good"
+ elif ratio >= 0.5:
+ return "score-ok"
+ return "score-bad"
+
+ train_class = score_class(train_correct, train_runs)
+ test_class = score_class(test_correct, test_runs)
+
+ row_class = "best-row" if iteration == best_iter else ""
+
+ html_parts.append(f"""
+ {iteration}
+ {train_correct}/{train_runs}
+ {test_correct}/{test_runs}
+ {html.escape(description)}
+""")
+
+ # Add result for each train query
+ for qinfo in train_queries:
+ r = train_by_query.get(qinfo["query"], {})
+ did_pass = r.get("pass", False)
+ triggers = r.get("triggers", 0)
+ runs = r.get("runs", 0)
+
+ icon = "✓" if did_pass else "✗"
+ css_class = "pass" if did_pass else "fail"
+
+ html_parts.append(f' {icon}{triggers}/{runs} \n')
+
+ # Add result for each test query (with different background)
+ for qinfo in test_queries:
+ r = test_by_query.get(qinfo["query"], {})
+ did_pass = r.get("pass", False)
+ triggers = r.get("triggers", 0)
+ runs = r.get("runs", 0)
+
+ icon = "✓" if did_pass else "✗"
+ css_class = "pass" if did_pass else "fail"
+
+ html_parts.append(f' {icon}{triggers}/{runs} \n')
+
+ html_parts.append(" \n")
+
+ html_parts.append("""
+
+
+""")
+
+ html_parts.append("""
+
+
+""")
+
+ return "".join(html_parts)
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Generate HTML report from run_loop output")
+ parser.add_argument("input", help="Path to JSON output from run_loop.py (or - for stdin)")
+ parser.add_argument("-o", "--output", default=None, help="Output HTML file (default: stdout)")
+ parser.add_argument("--skill-name", default="", help="Skill name to include in the report title")
+ args = parser.parse_args()
+
+ if args.input == "-":
+ data = json.load(sys.stdin)
+ else:
+ data = json.loads(Path(args.input).read_text())
+
+ html_output = generate_html(data, skill_name=args.skill_name)
+
+ if args.output:
+ Path(args.output).write_text(html_output)
+ print(f"Report written to {args.output}", file=sys.stderr)
+ else:
+ print(html_output)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/scripts/improve_description.py b/.claude/skills/skill-creator/scripts/improve_description.py
new file mode 100644
index 0000000..a270777
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/improve_description.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python3
+"""Improve a skill description based on eval results.
+
+Takes eval results (from run_eval.py) and generates an improved description
+using Claude with extended thinking.
+"""
+
+import argparse
+import json
+import re
+import sys
+from pathlib import Path
+
+import anthropic
+
+from scripts.utils import parse_skill_md
+
+
+def improve_description(
+ client: anthropic.Anthropic,
+ skill_name: str,
+ skill_content: str,
+ current_description: str,
+ eval_results: dict,
+ history: list[dict],
+ model: str,
+ test_results: dict | None = None,
+ log_dir: Path | None = None,
+ iteration: int | None = None,
+) -> str:
+ """Call Claude to improve the description based on eval results."""
+ failed_triggers = [
+ r for r in eval_results["results"]
+ if r["should_trigger"] and not r["pass"]
+ ]
+ false_triggers = [
+ r for r in eval_results["results"]
+ if not r["should_trigger"] and not r["pass"]
+ ]
+
+ # Build scores summary
+ train_score = f"{eval_results['summary']['passed']}/{eval_results['summary']['total']}"
+ if test_results:
+ test_score = f"{test_results['summary']['passed']}/{test_results['summary']['total']}"
+ scores_summary = f"Train: {train_score}, Test: {test_score}"
+ else:
+ scores_summary = f"Train: {train_score}"
+
+ prompt = f"""You are optimizing a skill description for a Claude Code skill called "{skill_name}". A "skill" is sort of like a prompt, but with progressive disclosure -- there's a title and description that Claude sees when deciding whether to use the skill, and then if it does use the skill, it reads the .md file which has lots more details and potentially links to other resources in the skill folder like helper files and scripts and additional documentation or examples.
+
+The description appears in Claude's "available_skills" list. When a user sends a query, Claude decides whether to invoke the skill based solely on the title and on this description. Your goal is to write a description that triggers for relevant queries, and doesn't trigger for irrelevant ones.
+
+Here's the current description:
+
+"{current_description}"
+
+
+Current scores ({scores_summary}):
+
+"""
+ if failed_triggers:
+ prompt += "FAILED TO TRIGGER (should have triggered but didn't):\n"
+ for r in failed_triggers:
+ prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n'
+ prompt += "\n"
+
+ if false_triggers:
+ prompt += "FALSE TRIGGERS (triggered but shouldn't have):\n"
+ for r in false_triggers:
+ prompt += f' - "{r["query"]}" (triggered {r["triggers"]}/{r["runs"]} times)\n'
+ prompt += "\n"
+
+ if history:
+ prompt += "PREVIOUS ATTEMPTS (do NOT repeat these — try something structurally different):\n\n"
+ for h in history:
+ train_s = f"{h.get('train_passed', h.get('passed', 0))}/{h.get('train_total', h.get('total', 0))}"
+ test_s = f"{h.get('test_passed', '?')}/{h.get('test_total', '?')}" if h.get('test_passed') is not None else None
+ score_str = f"train={train_s}" + (f", test={test_s}" if test_s else "")
+ prompt += f'\n'
+ prompt += f'Description: "{h["description"]}"\n'
+ if "results" in h:
+ prompt += "Train results:\n"
+ for r in h["results"]:
+ status = "PASS" if r["pass"] else "FAIL"
+ prompt += f' [{status}] "{r["query"][:80]}" (triggered {r["triggers"]}/{r["runs"]})\n'
+ if h.get("note"):
+ prompt += f'Note: {h["note"]}\n'
+ prompt += " \n\n"
+
+ prompt += f"""
+
+Skill content (for context on what the skill does):
+
+{skill_content}
+
+
+Based on the failures, write a new and improved description that is more likely to trigger correctly. When I say "based on the failures", it's a bit of a tricky line to walk because we don't want to overfit to the specific cases you're seeing. So what I DON'T want you to do is produce an ever-expanding list of specific queries that this skill should or shouldn't trigger for. Instead, try to generalize from the failures to broader categories of user intent and situations where this skill would be useful or not useful. The reason for this is twofold:
+
+1. Avoid overfitting
+2. The list might get loooong and it's injected into ALL queries and there might be a lot of skills, so we don't want to blow too much space on any given description.
+
+Concretely, your description should not be more than about 100-200 words, even if that comes at the cost of accuracy.
+
+Here are some tips that we've found to work well in writing these descriptions:
+- The skill should be phrased in the imperative -- "Use this skill for" rather than "this skill does"
+- The skill description should focus on the user's intent, what they are trying to achieve, vs. the implementation details of how the skill works.
+- The description competes with other skills for Claude's attention — make it distinctive and immediately recognizable.
+- If you're getting lots of failures after repeated attempts, change things up. Try different sentence structures or wordings.
+
+I'd encourage you to be creative and mix up the style in different iterations since you'll have multiple opportunities to try different approaches and we'll just grab the highest-scoring one at the end.
+
+Please respond with only the new description text in tags, nothing else."""
+
+ response = client.messages.create(
+ model=model,
+ max_tokens=16000,
+ thinking={
+ "type": "enabled",
+ "budget_tokens": 10000,
+ },
+ messages=[{"role": "user", "content": prompt}],
+ )
+
+ # Extract thinking and text from response
+ thinking_text = ""
+ text = ""
+ for block in response.content:
+ if block.type == "thinking":
+ thinking_text = block.thinking
+ elif block.type == "text":
+ text = block.text
+
+ # Parse out the tags
+ match = re.search(r"(.*?) ", text, re.DOTALL)
+ description = match.group(1).strip().strip('"') if match else text.strip().strip('"')
+
+ # Log the transcript
+ transcript: dict = {
+ "iteration": iteration,
+ "prompt": prompt,
+ "thinking": thinking_text,
+ "response": text,
+ "parsed_description": description,
+ "char_count": len(description),
+ "over_limit": len(description) > 1024,
+ }
+
+ # If over 1024 chars, ask the model to shorten it
+ if len(description) > 1024:
+ shorten_prompt = f"Your description is {len(description)} characters, which exceeds the hard 1024 character limit. Please rewrite it to be under 1024 characters while preserving the most important trigger words and intent coverage. Respond with only the new description in tags."
+ shorten_response = client.messages.create(
+ model=model,
+ max_tokens=16000,
+ thinking={
+ "type": "enabled",
+ "budget_tokens": 10000,
+ },
+ messages=[
+ {"role": "user", "content": prompt},
+ {"role": "assistant", "content": text},
+ {"role": "user", "content": shorten_prompt},
+ ],
+ )
+
+ shorten_thinking = ""
+ shorten_text = ""
+ for block in shorten_response.content:
+ if block.type == "thinking":
+ shorten_thinking = block.thinking
+ elif block.type == "text":
+ shorten_text = block.text
+
+ match = re.search(r"(.*?) ", shorten_text, re.DOTALL)
+ shortened = match.group(1).strip().strip('"') if match else shorten_text.strip().strip('"')
+
+ transcript["rewrite_prompt"] = shorten_prompt
+ transcript["rewrite_thinking"] = shorten_thinking
+ transcript["rewrite_response"] = shorten_text
+ transcript["rewrite_description"] = shortened
+ transcript["rewrite_char_count"] = len(shortened)
+ description = shortened
+
+ transcript["final_description"] = description
+
+ if log_dir:
+ log_dir.mkdir(parents=True, exist_ok=True)
+ log_file = log_dir / f"improve_iter_{iteration or 'unknown'}.json"
+ log_file.write_text(json.dumps(transcript, indent=2))
+
+ return description
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Improve a skill description based on eval results")
+ parser.add_argument("--eval-results", required=True, help="Path to eval results JSON (from run_eval.py)")
+ parser.add_argument("--skill-path", required=True, help="Path to skill directory")
+ parser.add_argument("--history", default=None, help="Path to history JSON (previous attempts)")
+ parser.add_argument("--model", required=True, help="Model for improvement")
+ parser.add_argument("--verbose", action="store_true", help="Print thinking to stderr")
+ args = parser.parse_args()
+
+ skill_path = Path(args.skill_path)
+ if not (skill_path / "SKILL.md").exists():
+ print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)
+ sys.exit(1)
+
+ eval_results = json.loads(Path(args.eval_results).read_text())
+ history = []
+ if args.history:
+ history = json.loads(Path(args.history).read_text())
+
+ name, _, content = parse_skill_md(skill_path)
+ current_description = eval_results["description"]
+
+ if args.verbose:
+ print(f"Current: {current_description}", file=sys.stderr)
+ print(f"Score: {eval_results['summary']['passed']}/{eval_results['summary']['total']}", file=sys.stderr)
+
+ client = anthropic.Anthropic()
+ new_description = improve_description(
+ client=client,
+ skill_name=name,
+ skill_content=content,
+ current_description=current_description,
+ eval_results=eval_results,
+ history=history,
+ model=args.model,
+ )
+
+ if args.verbose:
+ print(f"Improved: {new_description}", file=sys.stderr)
+
+ # Output as JSON with both the new description and updated history
+ output = {
+ "description": new_description,
+ "history": history + [{
+ "description": current_description,
+ "passed": eval_results["summary"]["passed"],
+ "failed": eval_results["summary"]["failed"],
+ "total": eval_results["summary"]["total"],
+ "results": eval_results["results"],
+ }],
+ }
+ print(json.dumps(output, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/scripts/package_skill.py b/.claude/skills/skill-creator/scripts/package_skill.py
new file mode 100644
index 0000000..f48eac4
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/package_skill.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+"""
+Skill Packager - Creates a distributable .skill file of a skill folder
+
+Usage:
+ python utils/package_skill.py [output-directory]
+
+Example:
+ python utils/package_skill.py skills/public/my-skill
+ python utils/package_skill.py skills/public/my-skill ./dist
+"""
+
+import fnmatch
+import sys
+import zipfile
+from pathlib import Path
+from scripts.quick_validate import validate_skill
+
+# Patterns to exclude when packaging skills.
+EXCLUDE_DIRS = {"__pycache__", "node_modules"}
+EXCLUDE_GLOBS = {"*.pyc"}
+EXCLUDE_FILES = {".DS_Store"}
+# Directories excluded only at the skill root (not when nested deeper).
+ROOT_EXCLUDE_DIRS = {"evals"}
+
+
+def should_exclude(rel_path: Path) -> bool:
+ """Check if a path should be excluded from packaging."""
+ parts = rel_path.parts
+ if any(part in EXCLUDE_DIRS for part in parts):
+ return True
+ # rel_path is relative to skill_path.parent, so parts[0] is the skill
+ # folder name and parts[1] (if present) is the first subdir.
+ if len(parts) > 1 and parts[1] in ROOT_EXCLUDE_DIRS:
+ return True
+ name = rel_path.name
+ if name in EXCLUDE_FILES:
+ return True
+ return any(fnmatch.fnmatch(name, pat) for pat in EXCLUDE_GLOBS)
+
+
+def package_skill(skill_path, output_dir=None):
+ """
+ Package a skill folder into a .skill file.
+
+ Args:
+ skill_path: Path to the skill folder
+ output_dir: Optional output directory for the .skill file (defaults to current directory)
+
+ Returns:
+ Path to the created .skill file, or None if error
+ """
+ skill_path = Path(skill_path).resolve()
+
+ # Validate skill folder exists
+ if not skill_path.exists():
+ print(f"❌ Error: Skill folder not found: {skill_path}")
+ return None
+
+ if not skill_path.is_dir():
+ print(f"❌ Error: Path is not a directory: {skill_path}")
+ return None
+
+ # Validate SKILL.md exists
+ skill_md = skill_path / "SKILL.md"
+ if not skill_md.exists():
+ print(f"❌ Error: SKILL.md not found in {skill_path}")
+ return None
+
+ # Run validation before packaging
+ print("🔍 Validating skill...")
+ valid, message = validate_skill(skill_path)
+ if not valid:
+ print(f"❌ Validation failed: {message}")
+ print(" Please fix the validation errors before packaging.")
+ return None
+ print(f"✅ {message}\n")
+
+ # Determine output location
+ skill_name = skill_path.name
+ if output_dir:
+ output_path = Path(output_dir).resolve()
+ output_path.mkdir(parents=True, exist_ok=True)
+ else:
+ output_path = Path.cwd()
+
+ skill_filename = output_path / f"{skill_name}.skill"
+
+ # Create the .skill file (zip format)
+ try:
+ with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
+ # Walk through the skill directory, excluding build artifacts
+ for file_path in skill_path.rglob('*'):
+ if not file_path.is_file():
+ continue
+ arcname = file_path.relative_to(skill_path.parent)
+ if should_exclude(arcname):
+ print(f" Skipped: {arcname}")
+ continue
+ zipf.write(file_path, arcname)
+ print(f" Added: {arcname}")
+
+ print(f"\n✅ Successfully packaged skill to: {skill_filename}")
+ return skill_filename
+
+ except Exception as e:
+ print(f"❌ Error creating .skill file: {e}")
+ return None
+
+
+def main():
+ if len(sys.argv) < 2:
+ print("Usage: python utils/package_skill.py [output-directory]")
+ print("\nExample:")
+ print(" python utils/package_skill.py skills/public/my-skill")
+ print(" python utils/package_skill.py skills/public/my-skill ./dist")
+ sys.exit(1)
+
+ skill_path = sys.argv[1]
+ output_dir = sys.argv[2] if len(sys.argv) > 2 else None
+
+ print(f"📦 Packaging skill: {skill_path}")
+ if output_dir:
+ print(f" Output directory: {output_dir}")
+ print()
+
+ result = package_skill(skill_path, output_dir)
+
+ if result:
+ sys.exit(0)
+ else:
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/scripts/quick_validate.py b/.claude/skills/skill-creator/scripts/quick_validate.py
new file mode 100644
index 0000000..ed8e1dd
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/quick_validate.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+"""
+Quick validation script for skills - minimal version
+"""
+
+import sys
+import os
+import re
+import yaml
+from pathlib import Path
+
+def validate_skill(skill_path):
+ """Basic validation of a skill"""
+ skill_path = Path(skill_path)
+
+ # Check SKILL.md exists
+ skill_md = skill_path / 'SKILL.md'
+ if not skill_md.exists():
+ return False, "SKILL.md not found"
+
+ # Read and validate frontmatter
+ content = skill_md.read_text()
+ if not content.startswith('---'):
+ return False, "No YAML frontmatter found"
+
+ # Extract frontmatter
+ match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL)
+ if not match:
+ return False, "Invalid frontmatter format"
+
+ frontmatter_text = match.group(1)
+
+ # Parse YAML frontmatter
+ try:
+ frontmatter = yaml.safe_load(frontmatter_text)
+ if not isinstance(frontmatter, dict):
+ return False, "Frontmatter must be a YAML dictionary"
+ except yaml.YAMLError as e:
+ return False, f"Invalid YAML in frontmatter: {e}"
+
+ # Define allowed properties
+ ALLOWED_PROPERTIES = {'name', 'description', 'license', 'allowed-tools', 'metadata', 'compatibility'}
+
+ # Check for unexpected properties (excluding nested keys under metadata)
+ unexpected_keys = set(frontmatter.keys()) - ALLOWED_PROPERTIES
+ if unexpected_keys:
+ return False, (
+ f"Unexpected key(s) in SKILL.md frontmatter: {', '.join(sorted(unexpected_keys))}. "
+ f"Allowed properties are: {', '.join(sorted(ALLOWED_PROPERTIES))}"
+ )
+
+ # Check required fields
+ if 'name' not in frontmatter:
+ return False, "Missing 'name' in frontmatter"
+ if 'description' not in frontmatter:
+ return False, "Missing 'description' in frontmatter"
+
+ # Extract name for validation
+ name = frontmatter.get('name', '')
+ if not isinstance(name, str):
+ return False, f"Name must be a string, got {type(name).__name__}"
+ name = name.strip()
+ if name:
+ # Check naming convention (kebab-case: lowercase with hyphens)
+ if not re.match(r'^[a-z0-9-]+$', name):
+ return False, f"Name '{name}' should be kebab-case (lowercase letters, digits, and hyphens only)"
+ if name.startswith('-') or name.endswith('-') or '--' in name:
+ return False, f"Name '{name}' cannot start/end with hyphen or contain consecutive hyphens"
+ # Check name length (max 64 characters per spec)
+ if len(name) > 64:
+ return False, f"Name is too long ({len(name)} characters). Maximum is 64 characters."
+
+ # Extract and validate description
+ description = frontmatter.get('description', '')
+ if not isinstance(description, str):
+ return False, f"Description must be a string, got {type(description).__name__}"
+ description = description.strip()
+ if description:
+ # Check for angle brackets
+ if '<' in description or '>' in description:
+ return False, "Description cannot contain angle brackets (< or >)"
+ # Check description length (max 1024 characters per spec)
+ if len(description) > 1024:
+ return False, f"Description is too long ({len(description)} characters). Maximum is 1024 characters."
+
+ # Validate compatibility field if present (optional)
+ compatibility = frontmatter.get('compatibility', '')
+ if compatibility:
+ if not isinstance(compatibility, str):
+ return False, f"Compatibility must be a string, got {type(compatibility).__name__}"
+ if len(compatibility) > 500:
+ return False, f"Compatibility is too long ({len(compatibility)} characters). Maximum is 500 characters."
+
+ return True, "Skill is valid!"
+
+if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ print("Usage: python quick_validate.py ")
+ sys.exit(1)
+
+ valid, message = validate_skill(sys.argv[1])
+ print(message)
+ sys.exit(0 if valid else 1)
\ No newline at end of file
diff --git a/.claude/skills/skill-creator/scripts/run_eval.py b/.claude/skills/skill-creator/scripts/run_eval.py
new file mode 100644
index 0000000..e58c70b
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/run_eval.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python3
+"""Run trigger evaluation for a skill description.
+
+Tests whether a skill's description causes Claude to trigger (read the skill)
+for a set of queries. Outputs results as JSON.
+"""
+
+import argparse
+import json
+import os
+import select
+import subprocess
+import sys
+import time
+import uuid
+from concurrent.futures import ProcessPoolExecutor, as_completed
+from pathlib import Path
+
+from scripts.utils import parse_skill_md
+
+
+def find_project_root() -> Path:
+ """Find the project root by walking up from cwd looking for .claude/.
+
+ Mimics how Claude Code discovers its project root, so the command file
+ we create ends up where claude -p will look for it.
+ """
+ current = Path.cwd()
+ for parent in [current, *current.parents]:
+ if (parent / ".claude").is_dir():
+ return parent
+ return current
+
+
+def run_single_query(
+ query: str,
+ skill_name: str,
+ skill_description: str,
+ timeout: int,
+ project_root: str,
+ model: str | None = None,
+) -> bool:
+ """Run a single query and return whether the skill was triggered.
+
+ Creates a command file in .claude/commands/ so it appears in Claude's
+ available_skills list, then runs `claude -p` with the raw query.
+ Uses --include-partial-messages to detect triggering early from
+ stream events (content_block_start) rather than waiting for the
+ full assistant message, which only arrives after tool execution.
+ """
+ unique_id = uuid.uuid4().hex[:8]
+ clean_name = f"{skill_name}-skill-{unique_id}"
+ project_commands_dir = Path(project_root) / ".claude" / "commands"
+ command_file = project_commands_dir / f"{clean_name}.md"
+
+ try:
+ project_commands_dir.mkdir(parents=True, exist_ok=True)
+ # Use YAML block scalar to avoid breaking on quotes in description
+ indented_desc = "\n ".join(skill_description.split("\n"))
+ command_content = (
+ f"---\n"
+ f"description: |\n"
+ f" {indented_desc}\n"
+ f"---\n\n"
+ f"# {skill_name}\n\n"
+ f"This skill handles: {skill_description}\n"
+ )
+ command_file.write_text(command_content)
+
+ cmd = [
+ "claude",
+ "-p", query,
+ "--output-format", "stream-json",
+ "--verbose",
+ "--include-partial-messages",
+ ]
+ if model:
+ cmd.extend(["--model", model])
+
+ # Remove CLAUDECODE env var to allow nesting claude -p inside a
+ # Claude Code session. The guard is for interactive terminal conflicts;
+ # programmatic subprocess usage is safe.
+ env = {k: v for k, v in os.environ.items() if k != "CLAUDECODE"}
+
+ process = subprocess.Popen(
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.DEVNULL,
+ cwd=project_root,
+ env=env,
+ )
+
+ triggered = False
+ start_time = time.time()
+ buffer = ""
+ # Track state for stream event detection
+ pending_tool_name = None
+ accumulated_json = ""
+
+ try:
+ while time.time() - start_time < timeout:
+ if process.poll() is not None:
+ remaining = process.stdout.read()
+ if remaining:
+ buffer += remaining.decode("utf-8", errors="replace")
+ break
+
+ ready, _, _ = select.select([process.stdout], [], [], 1.0)
+ if not ready:
+ continue
+
+ chunk = os.read(process.stdout.fileno(), 8192)
+ if not chunk:
+ break
+ buffer += chunk.decode("utf-8", errors="replace")
+
+ while "\n" in buffer:
+ line, buffer = buffer.split("\n", 1)
+ line = line.strip()
+ if not line:
+ continue
+
+ try:
+ event = json.loads(line)
+ except json.JSONDecodeError:
+ continue
+
+ # Early detection via stream events
+ if event.get("type") == "stream_event":
+ se = event.get("event", {})
+ se_type = se.get("type", "")
+
+ if se_type == "content_block_start":
+ cb = se.get("content_block", {})
+ if cb.get("type") == "tool_use":
+ tool_name = cb.get("name", "")
+ if tool_name in ("Skill", "Read"):
+ pending_tool_name = tool_name
+ accumulated_json = ""
+ else:
+ return False
+
+ elif se_type == "content_block_delta" and pending_tool_name:
+ delta = se.get("delta", {})
+ if delta.get("type") == "input_json_delta":
+ accumulated_json += delta.get("partial_json", "")
+ if clean_name in accumulated_json:
+ return True
+
+ elif se_type in ("content_block_stop", "message_stop"):
+ if pending_tool_name:
+ return clean_name in accumulated_json
+ if se_type == "message_stop":
+ return False
+
+ # Fallback: full assistant message
+ elif event.get("type") == "assistant":
+ message = event.get("message", {})
+ for content_item in message.get("content", []):
+ if content_item.get("type") != "tool_use":
+ continue
+ tool_name = content_item.get("name", "")
+ tool_input = content_item.get("input", {})
+ if tool_name == "Skill" and clean_name in tool_input.get("skill", ""):
+ triggered = True
+ elif tool_name == "Read" and clean_name in tool_input.get("file_path", ""):
+ triggered = True
+ return triggered
+
+ elif event.get("type") == "result":
+ return triggered
+ finally:
+ # Clean up process on any exit path (return, exception, timeout)
+ if process.poll() is None:
+ process.kill()
+ process.wait()
+
+ return triggered
+ finally:
+ if command_file.exists():
+ command_file.unlink()
+
+
+def run_eval(
+ eval_set: list[dict],
+ skill_name: str,
+ description: str,
+ num_workers: int,
+ timeout: int,
+ project_root: Path,
+ runs_per_query: int = 1,
+ trigger_threshold: float = 0.5,
+ model: str | None = None,
+) -> dict:
+ """Run the full eval set and return results."""
+ results = []
+
+ with ProcessPoolExecutor(max_workers=num_workers) as executor:
+ future_to_info = {}
+ for item in eval_set:
+ for run_idx in range(runs_per_query):
+ future = executor.submit(
+ run_single_query,
+ item["query"],
+ skill_name,
+ description,
+ timeout,
+ str(project_root),
+ model,
+ )
+ future_to_info[future] = (item, run_idx)
+
+ query_triggers: dict[str, list[bool]] = {}
+ query_items: dict[str, dict] = {}
+ for future in as_completed(future_to_info):
+ item, _ = future_to_info[future]
+ query = item["query"]
+ query_items[query] = item
+ if query not in query_triggers:
+ query_triggers[query] = []
+ try:
+ query_triggers[query].append(future.result())
+ except Exception as e:
+ print(f"Warning: query failed: {e}", file=sys.stderr)
+ query_triggers[query].append(False)
+
+ for query, triggers in query_triggers.items():
+ item = query_items[query]
+ trigger_rate = sum(triggers) / len(triggers)
+ should_trigger = item["should_trigger"]
+ if should_trigger:
+ did_pass = trigger_rate >= trigger_threshold
+ else:
+ did_pass = trigger_rate < trigger_threshold
+ results.append({
+ "query": query,
+ "should_trigger": should_trigger,
+ "trigger_rate": trigger_rate,
+ "triggers": sum(triggers),
+ "runs": len(triggers),
+ "pass": did_pass,
+ })
+
+ passed = sum(1 for r in results if r["pass"])
+ total = len(results)
+
+ return {
+ "skill_name": skill_name,
+ "description": description,
+ "results": results,
+ "summary": {
+ "total": total,
+ "passed": passed,
+ "failed": total - passed,
+ },
+ }
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Run trigger evaluation for a skill description")
+ parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file")
+ parser.add_argument("--skill-path", required=True, help="Path to skill directory")
+ parser.add_argument("--description", default=None, help="Override description to test")
+ parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers")
+ parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds")
+ parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query")
+ parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold")
+ parser.add_argument("--model", default=None, help="Model to use for claude -p (default: user's configured model)")
+ parser.add_argument("--verbose", action="store_true", help="Print progress to stderr")
+ args = parser.parse_args()
+
+ eval_set = json.loads(Path(args.eval_set).read_text())
+ skill_path = Path(args.skill_path)
+
+ if not (skill_path / "SKILL.md").exists():
+ print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)
+ sys.exit(1)
+
+ name, original_description, content = parse_skill_md(skill_path)
+ description = args.description or original_description
+ project_root = find_project_root()
+
+ if args.verbose:
+ print(f"Evaluating: {description}", file=sys.stderr)
+
+ output = run_eval(
+ eval_set=eval_set,
+ skill_name=name,
+ description=description,
+ num_workers=args.num_workers,
+ timeout=args.timeout,
+ project_root=project_root,
+ runs_per_query=args.runs_per_query,
+ trigger_threshold=args.trigger_threshold,
+ model=args.model,
+ )
+
+ if args.verbose:
+ summary = output["summary"]
+ print(f"Results: {summary['passed']}/{summary['total']} passed", file=sys.stderr)
+ for r in output["results"]:
+ status = "PASS" if r["pass"] else "FAIL"
+ rate_str = f"{r['triggers']}/{r['runs']}"
+ print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:70]}", file=sys.stderr)
+
+ print(json.dumps(output, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/scripts/run_loop.py b/.claude/skills/skill-creator/scripts/run_loop.py
new file mode 100644
index 0000000..36f9b4e
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/run_loop.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python3
+"""Run the eval + improve loop until all pass or max iterations reached.
+
+Combines run_eval.py and improve_description.py in a loop, tracking history
+and returning the best description found. Supports train/test split to prevent
+overfitting.
+"""
+
+import argparse
+import json
+import random
+import sys
+import tempfile
+import time
+import webbrowser
+from pathlib import Path
+
+import anthropic
+
+from scripts.generate_report import generate_html
+from scripts.improve_description import improve_description
+from scripts.run_eval import find_project_root, run_eval
+from scripts.utils import parse_skill_md
+
+
+def split_eval_set(eval_set: list[dict], holdout: float, seed: int = 42) -> tuple[list[dict], list[dict]]:
+ """Split eval set into train and test sets, stratified by should_trigger."""
+ random.seed(seed)
+
+ # Separate by should_trigger
+ trigger = [e for e in eval_set if e["should_trigger"]]
+ no_trigger = [e for e in eval_set if not e["should_trigger"]]
+
+ # Shuffle each group
+ random.shuffle(trigger)
+ random.shuffle(no_trigger)
+
+ # Calculate split points
+ n_trigger_test = max(1, int(len(trigger) * holdout))
+ n_no_trigger_test = max(1, int(len(no_trigger) * holdout))
+
+ # Split
+ test_set = trigger[:n_trigger_test] + no_trigger[:n_no_trigger_test]
+ train_set = trigger[n_trigger_test:] + no_trigger[n_no_trigger_test:]
+
+ return train_set, test_set
+
+
+def run_loop(
+ eval_set: list[dict],
+ skill_path: Path,
+ description_override: str | None,
+ num_workers: int,
+ timeout: int,
+ max_iterations: int,
+ runs_per_query: int,
+ trigger_threshold: float,
+ holdout: float,
+ model: str,
+ verbose: bool,
+ live_report_path: Path | None = None,
+ log_dir: Path | None = None,
+) -> dict:
+ """Run the eval + improvement loop."""
+ project_root = find_project_root()
+ name, original_description, content = parse_skill_md(skill_path)
+ current_description = description_override or original_description
+
+ # Split into train/test if holdout > 0
+ if holdout > 0:
+ train_set, test_set = split_eval_set(eval_set, holdout)
+ if verbose:
+ print(f"Split: {len(train_set)} train, {len(test_set)} test (holdout={holdout})", file=sys.stderr)
+ else:
+ train_set = eval_set
+ test_set = []
+
+ client = anthropic.Anthropic()
+ history = []
+ exit_reason = "unknown"
+
+ for iteration in range(1, max_iterations + 1):
+ if verbose:
+ print(f"\n{'='*60}", file=sys.stderr)
+ print(f"Iteration {iteration}/{max_iterations}", file=sys.stderr)
+ print(f"Description: {current_description}", file=sys.stderr)
+ print(f"{'='*60}", file=sys.stderr)
+
+ # Evaluate train + test together in one batch for parallelism
+ all_queries = train_set + test_set
+ t0 = time.time()
+ all_results = run_eval(
+ eval_set=all_queries,
+ skill_name=name,
+ description=current_description,
+ num_workers=num_workers,
+ timeout=timeout,
+ project_root=project_root,
+ runs_per_query=runs_per_query,
+ trigger_threshold=trigger_threshold,
+ model=model,
+ )
+ eval_elapsed = time.time() - t0
+
+ # Split results back into train/test by matching queries
+ train_queries_set = {q["query"] for q in train_set}
+ train_result_list = [r for r in all_results["results"] if r["query"] in train_queries_set]
+ test_result_list = [r for r in all_results["results"] if r["query"] not in train_queries_set]
+
+ train_passed = sum(1 for r in train_result_list if r["pass"])
+ train_total = len(train_result_list)
+ train_summary = {"passed": train_passed, "failed": train_total - train_passed, "total": train_total}
+ train_results = {"results": train_result_list, "summary": train_summary}
+
+ if test_set:
+ test_passed = sum(1 for r in test_result_list if r["pass"])
+ test_total = len(test_result_list)
+ test_summary = {"passed": test_passed, "failed": test_total - test_passed, "total": test_total}
+ test_results = {"results": test_result_list, "summary": test_summary}
+ else:
+ test_results = None
+ test_summary = None
+
+ history.append({
+ "iteration": iteration,
+ "description": current_description,
+ "train_passed": train_summary["passed"],
+ "train_failed": train_summary["failed"],
+ "train_total": train_summary["total"],
+ "train_results": train_results["results"],
+ "test_passed": test_summary["passed"] if test_summary else None,
+ "test_failed": test_summary["failed"] if test_summary else None,
+ "test_total": test_summary["total"] if test_summary else None,
+ "test_results": test_results["results"] if test_results else None,
+ # For backward compat with report generator
+ "passed": train_summary["passed"],
+ "failed": train_summary["failed"],
+ "total": train_summary["total"],
+ "results": train_results["results"],
+ })
+
+ # Write live report if path provided
+ if live_report_path:
+ partial_output = {
+ "original_description": original_description,
+ "best_description": current_description,
+ "best_score": "in progress",
+ "iterations_run": len(history),
+ "holdout": holdout,
+ "train_size": len(train_set),
+ "test_size": len(test_set),
+ "history": history,
+ }
+ live_report_path.write_text(generate_html(partial_output, auto_refresh=True, skill_name=name))
+
+ if verbose:
+ def print_eval_stats(label, results, elapsed):
+ pos = [r for r in results if r["should_trigger"]]
+ neg = [r for r in results if not r["should_trigger"]]
+ tp = sum(r["triggers"] for r in pos)
+ pos_runs = sum(r["runs"] for r in pos)
+ fn = pos_runs - tp
+ fp = sum(r["triggers"] for r in neg)
+ neg_runs = sum(r["runs"] for r in neg)
+ tn = neg_runs - fp
+ total = tp + tn + fp + fn
+ precision = tp / (tp + fp) if (tp + fp) > 0 else 1.0
+ recall = tp / (tp + fn) if (tp + fn) > 0 else 1.0
+ accuracy = (tp + tn) / total if total > 0 else 0.0
+ print(f"{label}: {tp+tn}/{total} correct, precision={precision:.0%} recall={recall:.0%} accuracy={accuracy:.0%} ({elapsed:.1f}s)", file=sys.stderr)
+ for r in results:
+ status = "PASS" if r["pass"] else "FAIL"
+ rate_str = f"{r['triggers']}/{r['runs']}"
+ print(f" [{status}] rate={rate_str} expected={r['should_trigger']}: {r['query'][:60]}", file=sys.stderr)
+
+ print_eval_stats("Train", train_results["results"], eval_elapsed)
+ if test_summary:
+ print_eval_stats("Test ", test_results["results"], 0)
+
+ if train_summary["failed"] == 0:
+ exit_reason = f"all_passed (iteration {iteration})"
+ if verbose:
+ print(f"\nAll train queries passed on iteration {iteration}!", file=sys.stderr)
+ break
+
+ if iteration == max_iterations:
+ exit_reason = f"max_iterations ({max_iterations})"
+ if verbose:
+ print(f"\nMax iterations reached ({max_iterations}).", file=sys.stderr)
+ break
+
+ # Improve the description based on train results
+ if verbose:
+ print(f"\nImproving description...", file=sys.stderr)
+
+ t0 = time.time()
+ # Strip test scores from history so improvement model can't see them
+ blinded_history = [
+ {k: v for k, v in h.items() if not k.startswith("test_")}
+ for h in history
+ ]
+ new_description = improve_description(
+ client=client,
+ skill_name=name,
+ skill_content=content,
+ current_description=current_description,
+ eval_results=train_results,
+ history=blinded_history,
+ model=model,
+ log_dir=log_dir,
+ iteration=iteration,
+ )
+ improve_elapsed = time.time() - t0
+
+ if verbose:
+ print(f"Proposed ({improve_elapsed:.1f}s): {new_description}", file=sys.stderr)
+
+ current_description = new_description
+
+ # Find the best iteration by TEST score (or train if no test set)
+ if test_set:
+ best = max(history, key=lambda h: h["test_passed"] or 0)
+ best_score = f"{best['test_passed']}/{best['test_total']}"
+ else:
+ best = max(history, key=lambda h: h["train_passed"])
+ best_score = f"{best['train_passed']}/{best['train_total']}"
+
+ if verbose:
+ print(f"\nExit reason: {exit_reason}", file=sys.stderr)
+ print(f"Best score: {best_score} (iteration {best['iteration']})", file=sys.stderr)
+
+ return {
+ "exit_reason": exit_reason,
+ "original_description": original_description,
+ "best_description": best["description"],
+ "best_score": best_score,
+ "best_train_score": f"{best['train_passed']}/{best['train_total']}",
+ "best_test_score": f"{best['test_passed']}/{best['test_total']}" if test_set else None,
+ "final_description": current_description,
+ "iterations_run": len(history),
+ "holdout": holdout,
+ "train_size": len(train_set),
+ "test_size": len(test_set),
+ "history": history,
+ }
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Run eval + improve loop")
+ parser.add_argument("--eval-set", required=True, help="Path to eval set JSON file")
+ parser.add_argument("--skill-path", required=True, help="Path to skill directory")
+ parser.add_argument("--description", default=None, help="Override starting description")
+ parser.add_argument("--num-workers", type=int, default=10, help="Number of parallel workers")
+ parser.add_argument("--timeout", type=int, default=30, help="Timeout per query in seconds")
+ parser.add_argument("--max-iterations", type=int, default=5, help="Max improvement iterations")
+ parser.add_argument("--runs-per-query", type=int, default=3, help="Number of runs per query")
+ parser.add_argument("--trigger-threshold", type=float, default=0.5, help="Trigger rate threshold")
+ parser.add_argument("--holdout", type=float, default=0.4, help="Fraction of eval set to hold out for testing (0 to disable)")
+ parser.add_argument("--model", required=True, help="Model for improvement")
+ parser.add_argument("--verbose", action="store_true", help="Print progress to stderr")
+ parser.add_argument("--report", default="auto", help="Generate HTML report at this path (default: 'auto' for temp file, 'none' to disable)")
+ parser.add_argument("--results-dir", default=None, help="Save all outputs (results.json, report.html, log.txt) to a timestamped subdirectory here")
+ args = parser.parse_args()
+
+ eval_set = json.loads(Path(args.eval_set).read_text())
+ skill_path = Path(args.skill_path)
+
+ if not (skill_path / "SKILL.md").exists():
+ print(f"Error: No SKILL.md found at {skill_path}", file=sys.stderr)
+ sys.exit(1)
+
+ name, _, _ = parse_skill_md(skill_path)
+
+ # Set up live report path
+ if args.report != "none":
+ if args.report == "auto":
+ timestamp = time.strftime("%Y%m%d_%H%M%S")
+ live_report_path = Path(tempfile.gettempdir()) / f"skill_description_report_{skill_path.name}_{timestamp}.html"
+ else:
+ live_report_path = Path(args.report)
+ # Open the report immediately so the user can watch
+ live_report_path.write_text("Starting optimization loop... ")
+ webbrowser.open(str(live_report_path))
+ else:
+ live_report_path = None
+
+ # Determine output directory (create before run_loop so logs can be written)
+ if args.results_dir:
+ timestamp = time.strftime("%Y-%m-%d_%H%M%S")
+ results_dir = Path(args.results_dir) / timestamp
+ results_dir.mkdir(parents=True, exist_ok=True)
+ else:
+ results_dir = None
+
+ log_dir = results_dir / "logs" if results_dir else None
+
+ output = run_loop(
+ eval_set=eval_set,
+ skill_path=skill_path,
+ description_override=args.description,
+ num_workers=args.num_workers,
+ timeout=args.timeout,
+ max_iterations=args.max_iterations,
+ runs_per_query=args.runs_per_query,
+ trigger_threshold=args.trigger_threshold,
+ holdout=args.holdout,
+ model=args.model,
+ verbose=args.verbose,
+ live_report_path=live_report_path,
+ log_dir=log_dir,
+ )
+
+ # Save JSON output
+ json_output = json.dumps(output, indent=2)
+ print(json_output)
+ if results_dir:
+ (results_dir / "results.json").write_text(json_output)
+
+ # Write final HTML report (without auto-refresh)
+ if live_report_path:
+ live_report_path.write_text(generate_html(output, auto_refresh=False, skill_name=name))
+ print(f"\nReport: {live_report_path}", file=sys.stderr)
+
+ if results_dir and live_report_path:
+ (results_dir / "report.html").write_text(generate_html(output, auto_refresh=False, skill_name=name))
+
+ if results_dir:
+ print(f"Results saved to: {results_dir}", file=sys.stderr)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/skill-creator/scripts/utils.py b/.claude/skills/skill-creator/scripts/utils.py
new file mode 100644
index 0000000..51b6a07
--- /dev/null
+++ b/.claude/skills/skill-creator/scripts/utils.py
@@ -0,0 +1,47 @@
+"""Shared utilities for skill-creator scripts."""
+
+from pathlib import Path
+
+
+
+def parse_skill_md(skill_path: Path) -> tuple[str, str, str]:
+ """Parse a SKILL.md file, returning (name, description, full_content)."""
+ content = (skill_path / "SKILL.md").read_text()
+ lines = content.split("\n")
+
+ if lines[0].strip() != "---":
+ raise ValueError("SKILL.md missing frontmatter (no opening ---)")
+
+ end_idx = None
+ for i, line in enumerate(lines[1:], start=1):
+ if line.strip() == "---":
+ end_idx = i
+ break
+
+ if end_idx is None:
+ raise ValueError("SKILL.md missing frontmatter (no closing ---)")
+
+ name = ""
+ description = ""
+ frontmatter_lines = lines[1:end_idx]
+ i = 0
+ while i < len(frontmatter_lines):
+ line = frontmatter_lines[i]
+ if line.startswith("name:"):
+ name = line[len("name:"):].strip().strip('"').strip("'")
+ elif line.startswith("description:"):
+ value = line[len("description:"):].strip()
+ # Handle YAML multiline indicators (>, |, >-, |-)
+ if value in (">", "|", ">-", "|-"):
+ continuation_lines: list[str] = []
+ i += 1
+ while i < len(frontmatter_lines) and (frontmatter_lines[i].startswith(" ") or frontmatter_lines[i].startswith("\t")):
+ continuation_lines.append(frontmatter_lines[i].strip())
+ i += 1
+ description = " ".join(continuation_lines)
+ continue
+ else:
+ description = value.strip('"').strip("'")
+ i += 1
+
+ return name, description, content
diff --git a/.claude/skills/slack-gif-creator/.openskills.json b/.claude/skills/slack-gif-creator/.openskills.json
new file mode 100644
index 0000000..2a64370
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\slack-gif-creator",
+ "installedAt": "2026-03-02T09:19:50.159Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/slack-gif-creator/LICENSE.txt b/.claude/skills/slack-gif-creator/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/slack-gif-creator/SKILL.md b/.claude/skills/slack-gif-creator/SKILL.md
new file mode 100644
index 0000000..16660d8
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/SKILL.md
@@ -0,0 +1,254 @@
+---
+name: slack-gif-creator
+description: Knowledge and utilities for creating animated GIFs optimized for Slack. Provides constraints, validation tools, and animation concepts. Use when users request animated GIFs for Slack like "make me a GIF of X doing Y for Slack."
+license: Complete terms in LICENSE.txt
+---
+
+# Slack GIF Creator
+
+A toolkit providing utilities and knowledge for creating animated GIFs optimized for Slack.
+
+## Slack Requirements
+
+**Dimensions:**
+- Emoji GIFs: 128x128 (recommended)
+- Message GIFs: 480x480
+
+**Parameters:**
+- FPS: 10-30 (lower is smaller file size)
+- Colors: 48-128 (fewer = smaller file size)
+- Duration: Keep under 3 seconds for emoji GIFs
+
+## Core Workflow
+
+```python
+from core.gif_builder import GIFBuilder
+from PIL import Image, ImageDraw
+
+# 1. Create builder
+builder = GIFBuilder(width=128, height=128, fps=10)
+
+# 2. Generate frames
+for i in range(12):
+ frame = Image.new('RGB', (128, 128), (240, 248, 255))
+ draw = ImageDraw.Draw(frame)
+
+ # Draw your animation using PIL primitives
+ # (circles, polygons, lines, etc.)
+
+ builder.add_frame(frame)
+
+# 3. Save with optimization
+builder.save('output.gif', num_colors=48, optimize_for_emoji=True)
+```
+
+## Drawing Graphics
+
+### Working with User-Uploaded Images
+If a user uploads an image, consider whether they want to:
+- **Use it directly** (e.g., "animate this", "split this into frames")
+- **Use it as inspiration** (e.g., "make something like this")
+
+Load and work with images using PIL:
+```python
+from PIL import Image
+
+uploaded = Image.open('file.png')
+# Use directly, or just as reference for colors/style
+```
+
+### Drawing from Scratch
+When drawing graphics from scratch, use PIL ImageDraw primitives:
+
+```python
+from PIL import ImageDraw
+
+draw = ImageDraw.Draw(frame)
+
+# Circles/ovals
+draw.ellipse([x1, y1, x2, y2], fill=(r, g, b), outline=(r, g, b), width=3)
+
+# Stars, triangles, any polygon
+points = [(x1, y1), (x2, y2), (x3, y3), ...]
+draw.polygon(points, fill=(r, g, b), outline=(r, g, b), width=3)
+
+# Lines
+draw.line([(x1, y1), (x2, y2)], fill=(r, g, b), width=5)
+
+# Rectangles
+draw.rectangle([x1, y1, x2, y2], fill=(r, g, b), outline=(r, g, b), width=3)
+```
+
+**Don't use:** Emoji fonts (unreliable across platforms) or assume pre-packaged graphics exist in this skill.
+
+### Making Graphics Look Good
+
+Graphics should look polished and creative, not basic. Here's how:
+
+**Use thicker lines** - Always set `width=2` or higher for outlines and lines. Thin lines (width=1) look choppy and amateurish.
+
+**Add visual depth**:
+- Use gradients for backgrounds (`create_gradient_background`)
+- Layer multiple shapes for complexity (e.g., a star with a smaller star inside)
+
+**Make shapes more interesting**:
+- Don't just draw a plain circle - add highlights, rings, or patterns
+- Stars can have glows (draw larger, semi-transparent versions behind)
+- Combine multiple shapes (stars + sparkles, circles + rings)
+
+**Pay attention to colors**:
+- Use vibrant, complementary colors
+- Add contrast (dark outlines on light shapes, light outlines on dark shapes)
+- Consider the overall composition
+
+**For complex shapes** (hearts, snowflakes, etc.):
+- Use combinations of polygons and ellipses
+- Calculate points carefully for symmetry
+- Add details (a heart can have a highlight curve, snowflakes have intricate branches)
+
+Be creative and detailed! A good Slack GIF should look polished, not like placeholder graphics.
+
+## Available Utilities
+
+### GIFBuilder (`core.gif_builder`)
+Assembles frames and optimizes for Slack:
+```python
+builder = GIFBuilder(width=128, height=128, fps=10)
+builder.add_frame(frame) # Add PIL Image
+builder.add_frames(frames) # Add list of frames
+builder.save('out.gif', num_colors=48, optimize_for_emoji=True, remove_duplicates=True)
+```
+
+### Validators (`core.validators`)
+Check if GIF meets Slack requirements:
+```python
+from core.validators import validate_gif, is_slack_ready
+
+# Detailed validation
+passes, info = validate_gif('my.gif', is_emoji=True, verbose=True)
+
+# Quick check
+if is_slack_ready('my.gif'):
+ print("Ready!")
+```
+
+### Easing Functions (`core.easing`)
+Smooth motion instead of linear:
+```python
+from core.easing import interpolate
+
+# Progress from 0.0 to 1.0
+t = i / (num_frames - 1)
+
+# Apply easing
+y = interpolate(start=0, end=400, t=t, easing='ease_out')
+
+# Available: linear, ease_in, ease_out, ease_in_out,
+# bounce_out, elastic_out, back_out
+```
+
+### Frame Helpers (`core.frame_composer`)
+Convenience functions for common needs:
+```python
+from core.frame_composer import (
+ create_blank_frame, # Solid color background
+ create_gradient_background, # Vertical gradient
+ draw_circle, # Helper for circles
+ draw_text, # Simple text rendering
+ draw_star # 5-pointed star
+)
+```
+
+## Animation Concepts
+
+### Shake/Vibrate
+Offset object position with oscillation:
+- Use `math.sin()` or `math.cos()` with frame index
+- Add small random variations for natural feel
+- Apply to x and/or y position
+
+### Pulse/Heartbeat
+Scale object size rhythmically:
+- Use `math.sin(t * frequency * 2 * math.pi)` for smooth pulse
+- For heartbeat: two quick pulses then pause (adjust sine wave)
+- Scale between 0.8 and 1.2 of base size
+
+### Bounce
+Object falls and bounces:
+- Use `interpolate()` with `easing='bounce_out'` for landing
+- Use `easing='ease_in'` for falling (accelerating)
+- Apply gravity by increasing y velocity each frame
+
+### Spin/Rotate
+Rotate object around center:
+- PIL: `image.rotate(angle, resample=Image.BICUBIC)`
+- For wobble: use sine wave for angle instead of linear
+
+### Fade In/Out
+Gradually appear or disappear:
+- Create RGBA image, adjust alpha channel
+- Or use `Image.blend(image1, image2, alpha)`
+- Fade in: alpha from 0 to 1
+- Fade out: alpha from 1 to 0
+
+### Slide
+Move object from off-screen to position:
+- Start position: outside frame bounds
+- End position: target location
+- Use `interpolate()` with `easing='ease_out'` for smooth stop
+- For overshoot: use `easing='back_out'`
+
+### Zoom
+Scale and position for zoom effect:
+- Zoom in: scale from 0.1 to 2.0, crop center
+- Zoom out: scale from 2.0 to 1.0
+- Can add motion blur for drama (PIL filter)
+
+### Explode/Particle Burst
+Create particles radiating outward:
+- Generate particles with random angles and velocities
+- Update each particle: `x += vx`, `y += vy`
+- Add gravity: `vy += gravity_constant`
+- Fade out particles over time (reduce alpha)
+
+## Optimization Strategies
+
+Only when asked to make the file size smaller, implement a few of the following methods:
+
+1. **Fewer frames** - Lower FPS (10 instead of 20) or shorter duration
+2. **Fewer colors** - `num_colors=48` instead of 128
+3. **Smaller dimensions** - 128x128 instead of 480x480
+4. **Remove duplicates** - `remove_duplicates=True` in save()
+5. **Emoji mode** - `optimize_for_emoji=True` auto-optimizes
+
+```python
+# Maximum optimization for emoji
+builder.save(
+ 'emoji.gif',
+ num_colors=48,
+ optimize_for_emoji=True,
+ remove_duplicates=True
+)
+```
+
+## Philosophy
+
+This skill provides:
+- **Knowledge**: Slack's requirements and animation concepts
+- **Utilities**: GIFBuilder, validators, easing functions
+- **Flexibility**: Create the animation logic using PIL primitives
+
+It does NOT provide:
+- Rigid animation templates or pre-made functions
+- Emoji font rendering (unreliable across platforms)
+- A library of pre-packaged graphics built into the skill
+
+**Note on user uploads**: This skill doesn't include pre-built graphics, but if a user uploads an image, use PIL to load and work with it - interpret based on their request whether they want it used directly or just as inspiration.
+
+Be creative! Combine concepts (bouncing + rotating, pulsing + sliding, etc.) and use PIL's full capabilities.
+
+## Dependencies
+
+```bash
+pip install pillow imageio numpy
+```
diff --git a/.claude/skills/slack-gif-creator/core/easing.py b/.claude/skills/slack-gif-creator/core/easing.py
new file mode 100644
index 0000000..772fa83
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/core/easing.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python3
+"""
+Easing Functions - Timing functions for smooth animations.
+
+Provides various easing functions for natural motion and timing.
+All functions take a value t (0.0 to 1.0) and return eased value (0.0 to 1.0).
+"""
+
+import math
+
+
+def linear(t: float) -> float:
+ """Linear interpolation (no easing)."""
+ return t
+
+
+def ease_in_quad(t: float) -> float:
+ """Quadratic ease-in (slow start, accelerating)."""
+ return t * t
+
+
+def ease_out_quad(t: float) -> float:
+ """Quadratic ease-out (fast start, decelerating)."""
+ return t * (2 - t)
+
+
+def ease_in_out_quad(t: float) -> float:
+ """Quadratic ease-in-out (slow start and end)."""
+ if t < 0.5:
+ return 2 * t * t
+ return -1 + (4 - 2 * t) * t
+
+
+def ease_in_cubic(t: float) -> float:
+ """Cubic ease-in (slow start)."""
+ return t * t * t
+
+
+def ease_out_cubic(t: float) -> float:
+ """Cubic ease-out (fast start)."""
+ return (t - 1) * (t - 1) * (t - 1) + 1
+
+
+def ease_in_out_cubic(t: float) -> float:
+ """Cubic ease-in-out."""
+ if t < 0.5:
+ return 4 * t * t * t
+ return (t - 1) * (2 * t - 2) * (2 * t - 2) + 1
+
+
+def ease_in_bounce(t: float) -> float:
+ """Bounce ease-in (bouncy start)."""
+ return 1 - ease_out_bounce(1 - t)
+
+
+def ease_out_bounce(t: float) -> float:
+ """Bounce ease-out (bouncy end)."""
+ if t < 1 / 2.75:
+ return 7.5625 * t * t
+ elif t < 2 / 2.75:
+ t -= 1.5 / 2.75
+ return 7.5625 * t * t + 0.75
+ elif t < 2.5 / 2.75:
+ t -= 2.25 / 2.75
+ return 7.5625 * t * t + 0.9375
+ else:
+ t -= 2.625 / 2.75
+ return 7.5625 * t * t + 0.984375
+
+
+def ease_in_out_bounce(t: float) -> float:
+ """Bounce ease-in-out."""
+ if t < 0.5:
+ return ease_in_bounce(t * 2) * 0.5
+ return ease_out_bounce(t * 2 - 1) * 0.5 + 0.5
+
+
+def ease_in_elastic(t: float) -> float:
+ """Elastic ease-in (spring effect)."""
+ if t == 0 or t == 1:
+ return t
+ return -math.pow(2, 10 * (t - 1)) * math.sin((t - 1.1) * 5 * math.pi)
+
+
+def ease_out_elastic(t: float) -> float:
+ """Elastic ease-out (spring effect)."""
+ if t == 0 or t == 1:
+ return t
+ return math.pow(2, -10 * t) * math.sin((t - 0.1) * 5 * math.pi) + 1
+
+
+def ease_in_out_elastic(t: float) -> float:
+ """Elastic ease-in-out."""
+ if t == 0 or t == 1:
+ return t
+ t = t * 2 - 1
+ if t < 0:
+ return -0.5 * math.pow(2, 10 * t) * math.sin((t - 0.1) * 5 * math.pi)
+ return math.pow(2, -10 * t) * math.sin((t - 0.1) * 5 * math.pi) * 0.5 + 1
+
+
+# Convenience mapping
+EASING_FUNCTIONS = {
+ "linear": linear,
+ "ease_in": ease_in_quad,
+ "ease_out": ease_out_quad,
+ "ease_in_out": ease_in_out_quad,
+ "bounce_in": ease_in_bounce,
+ "bounce_out": ease_out_bounce,
+ "bounce": ease_in_out_bounce,
+ "elastic_in": ease_in_elastic,
+ "elastic_out": ease_out_elastic,
+ "elastic": ease_in_out_elastic,
+}
+
+
+def get_easing(name: str = "linear"):
+ """Get easing function by name."""
+ return EASING_FUNCTIONS.get(name, linear)
+
+
+def interpolate(start: float, end: float, t: float, easing: str = "linear") -> float:
+ """
+ Interpolate between two values with easing.
+
+ Args:
+ start: Start value
+ end: End value
+ t: Progress from 0.0 to 1.0
+ easing: Name of easing function
+
+ Returns:
+ Interpolated value
+ """
+ ease_func = get_easing(easing)
+ eased_t = ease_func(t)
+ return start + (end - start) * eased_t
+
+
+def ease_back_in(t: float) -> float:
+ """Back ease-in (slight overshoot backward before forward motion)."""
+ c1 = 1.70158
+ c3 = c1 + 1
+ return c3 * t * t * t - c1 * t * t
+
+
+def ease_back_out(t: float) -> float:
+ """Back ease-out (overshoot forward then settle back)."""
+ c1 = 1.70158
+ c3 = c1 + 1
+ return 1 + c3 * pow(t - 1, 3) + c1 * pow(t - 1, 2)
+
+
+def ease_back_in_out(t: float) -> float:
+ """Back ease-in-out (overshoot at both ends)."""
+ c1 = 1.70158
+ c2 = c1 * 1.525
+ if t < 0.5:
+ return (pow(2 * t, 2) * ((c2 + 1) * 2 * t - c2)) / 2
+ return (pow(2 * t - 2, 2) * ((c2 + 1) * (t * 2 - 2) + c2) + 2) / 2
+
+
+def apply_squash_stretch(
+ base_scale: tuple[float, float], intensity: float, direction: str = "vertical"
+) -> tuple[float, float]:
+ """
+ Calculate squash and stretch scales for more dynamic animation.
+
+ Args:
+ base_scale: (width_scale, height_scale) base scales
+ intensity: Squash/stretch intensity (0.0-1.0)
+ direction: 'vertical', 'horizontal', or 'both'
+
+ Returns:
+ (width_scale, height_scale) with squash/stretch applied
+ """
+ width_scale, height_scale = base_scale
+
+ if direction == "vertical":
+ # Compress vertically, expand horizontally (preserve volume)
+ height_scale *= 1 - intensity * 0.5
+ width_scale *= 1 + intensity * 0.5
+ elif direction == "horizontal":
+ # Compress horizontally, expand vertically
+ width_scale *= 1 - intensity * 0.5
+ height_scale *= 1 + intensity * 0.5
+ elif direction == "both":
+ # General squash (both dimensions)
+ width_scale *= 1 - intensity * 0.3
+ height_scale *= 1 - intensity * 0.3
+
+ return (width_scale, height_scale)
+
+
+def calculate_arc_motion(
+ start: tuple[float, float], end: tuple[float, float], height: float, t: float
+) -> tuple[float, float]:
+ """
+ Calculate position along a parabolic arc (natural motion path).
+
+ Args:
+ start: (x, y) starting position
+ end: (x, y) ending position
+ height: Arc height at midpoint (positive = upward)
+ t: Progress (0.0-1.0)
+
+ Returns:
+ (x, y) position along arc
+ """
+ x1, y1 = start
+ x2, y2 = end
+
+ # Linear interpolation for x
+ x = x1 + (x2 - x1) * t
+
+ # Parabolic interpolation for y
+ # y = start + progress * (end - start) + arc_offset
+ # Arc offset peaks at t=0.5
+ arc_offset = 4 * height * t * (1 - t)
+ y = y1 + (y2 - y1) * t - arc_offset
+
+ return (x, y)
+
+
+# Add new easing functions to the convenience mapping
+EASING_FUNCTIONS.update(
+ {
+ "back_in": ease_back_in,
+ "back_out": ease_back_out,
+ "back_in_out": ease_back_in_out,
+ "anticipate": ease_back_in, # Alias
+ "overshoot": ease_back_out, # Alias
+ }
+)
diff --git a/.claude/skills/slack-gif-creator/core/frame_composer.py b/.claude/skills/slack-gif-creator/core/frame_composer.py
new file mode 100644
index 0000000..1afe434
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/core/frame_composer.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python3
+"""
+Frame Composer - Utilities for composing visual elements into frames.
+
+Provides functions for drawing shapes, text, emojis, and compositing elements
+together to create animation frames.
+"""
+
+from typing import Optional
+
+import numpy as np
+from PIL import Image, ImageDraw, ImageFont
+
+
+def create_blank_frame(
+ width: int, height: int, color: tuple[int, int, int] = (255, 255, 255)
+) -> Image.Image:
+ """
+ Create a blank frame with solid color background.
+
+ Args:
+ width: Frame width
+ height: Frame height
+ color: RGB color tuple (default: white)
+
+ Returns:
+ PIL Image
+ """
+ return Image.new("RGB", (width, height), color)
+
+
+def draw_circle(
+ frame: Image.Image,
+ center: tuple[int, int],
+ radius: int,
+ fill_color: Optional[tuple[int, int, int]] = None,
+ outline_color: Optional[tuple[int, int, int]] = None,
+ outline_width: int = 1,
+) -> Image.Image:
+ """
+ Draw a circle on a frame.
+
+ Args:
+ frame: PIL Image to draw on
+ center: (x, y) center position
+ radius: Circle radius
+ fill_color: RGB fill color (None for no fill)
+ outline_color: RGB outline color (None for no outline)
+ outline_width: Outline width in pixels
+
+ Returns:
+ Modified frame
+ """
+ draw = ImageDraw.Draw(frame)
+ x, y = center
+ bbox = [x - radius, y - radius, x + radius, y + radius]
+ draw.ellipse(bbox, fill=fill_color, outline=outline_color, width=outline_width)
+ return frame
+
+
+def draw_text(
+ frame: Image.Image,
+ text: str,
+ position: tuple[int, int],
+ color: tuple[int, int, int] = (0, 0, 0),
+ centered: bool = False,
+) -> Image.Image:
+ """
+ Draw text on a frame.
+
+ Args:
+ frame: PIL Image to draw on
+ text: Text to draw
+ position: (x, y) position (top-left unless centered=True)
+ color: RGB text color
+ centered: If True, center text at position
+
+ Returns:
+ Modified frame
+ """
+ draw = ImageDraw.Draw(frame)
+
+ # Uses Pillow's default font.
+ # If the font should be changed for the emoji, add additional logic here.
+ font = ImageFont.load_default()
+
+ if centered:
+ bbox = draw.textbbox((0, 0), text, font=font)
+ text_width = bbox[2] - bbox[0]
+ text_height = bbox[3] - bbox[1]
+ x = position[0] - text_width // 2
+ y = position[1] - text_height // 2
+ position = (x, y)
+
+ draw.text(position, text, fill=color, font=font)
+ return frame
+
+
+def create_gradient_background(
+ width: int,
+ height: int,
+ top_color: tuple[int, int, int],
+ bottom_color: tuple[int, int, int],
+) -> Image.Image:
+ """
+ Create a vertical gradient background.
+
+ Args:
+ width: Frame width
+ height: Frame height
+ top_color: RGB color at top
+ bottom_color: RGB color at bottom
+
+ Returns:
+ PIL Image with gradient
+ """
+ frame = Image.new("RGB", (width, height))
+ draw = ImageDraw.Draw(frame)
+
+ # Calculate color step for each row
+ r1, g1, b1 = top_color
+ r2, g2, b2 = bottom_color
+
+ for y in range(height):
+ # Interpolate color
+ ratio = y / height
+ r = int(r1 * (1 - ratio) + r2 * ratio)
+ g = int(g1 * (1 - ratio) + g2 * ratio)
+ b = int(b1 * (1 - ratio) + b2 * ratio)
+
+ # Draw horizontal line
+ draw.line([(0, y), (width, y)], fill=(r, g, b))
+
+ return frame
+
+
+def draw_star(
+ frame: Image.Image,
+ center: tuple[int, int],
+ size: int,
+ fill_color: tuple[int, int, int],
+ outline_color: Optional[tuple[int, int, int]] = None,
+ outline_width: int = 1,
+) -> Image.Image:
+ """
+ Draw a 5-pointed star.
+
+ Args:
+ frame: PIL Image to draw on
+ center: (x, y) center position
+ size: Star size (outer radius)
+ fill_color: RGB fill color
+ outline_color: RGB outline color (None for no outline)
+ outline_width: Outline width
+
+ Returns:
+ Modified frame
+ """
+ import math
+
+ draw = ImageDraw.Draw(frame)
+ x, y = center
+
+ # Calculate star points
+ points = []
+ for i in range(10):
+ angle = (i * 36 - 90) * math.pi / 180 # 36 degrees per point, start at top
+ radius = size if i % 2 == 0 else size * 0.4 # Alternate between outer and inner
+ px = x + radius * math.cos(angle)
+ py = y + radius * math.sin(angle)
+ points.append((px, py))
+
+ # Draw star
+ draw.polygon(points, fill=fill_color, outline=outline_color, width=outline_width)
+
+ return frame
diff --git a/.claude/skills/slack-gif-creator/core/gif_builder.py b/.claude/skills/slack-gif-creator/core/gif_builder.py
new file mode 100644
index 0000000..5759f14
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/core/gif_builder.py
@@ -0,0 +1,269 @@
+#!/usr/bin/env python3
+"""
+GIF Builder - Core module for assembling frames into GIFs optimized for Slack.
+
+This module provides the main interface for creating GIFs from programmatically
+generated frames, with automatic optimization for Slack's requirements.
+"""
+
+from pathlib import Path
+from typing import Optional
+
+import imageio.v3 as imageio
+import numpy as np
+from PIL import Image
+
+
+class GIFBuilder:
+ """Builder for creating optimized GIFs from frames."""
+
+ def __init__(self, width: int = 480, height: int = 480, fps: int = 15):
+ """
+ Initialize GIF builder.
+
+ Args:
+ width: Frame width in pixels
+ height: Frame height in pixels
+ fps: Frames per second
+ """
+ self.width = width
+ self.height = height
+ self.fps = fps
+ self.frames: list[np.ndarray] = []
+
+ def add_frame(self, frame: np.ndarray | Image.Image):
+ """
+ Add a frame to the GIF.
+
+ Args:
+ frame: Frame as numpy array or PIL Image (will be converted to RGB)
+ """
+ if isinstance(frame, Image.Image):
+ frame = np.array(frame.convert("RGB"))
+
+ # Ensure frame is correct size
+ if frame.shape[:2] != (self.height, self.width):
+ pil_frame = Image.fromarray(frame)
+ pil_frame = pil_frame.resize(
+ (self.width, self.height), Image.Resampling.LANCZOS
+ )
+ frame = np.array(pil_frame)
+
+ self.frames.append(frame)
+
+ def add_frames(self, frames: list[np.ndarray | Image.Image]):
+ """Add multiple frames at once."""
+ for frame in frames:
+ self.add_frame(frame)
+
+ def optimize_colors(
+ self, num_colors: int = 128, use_global_palette: bool = True
+ ) -> list[np.ndarray]:
+ """
+ Reduce colors in all frames using quantization.
+
+ Args:
+ num_colors: Target number of colors (8-256)
+ use_global_palette: Use a single palette for all frames (better compression)
+
+ Returns:
+ List of color-optimized frames
+ """
+ optimized = []
+
+ if use_global_palette and len(self.frames) > 1:
+ # Create a global palette from all frames
+ # Sample frames to build palette
+ sample_size = min(5, len(self.frames))
+ sample_indices = [
+ int(i * len(self.frames) / sample_size) for i in range(sample_size)
+ ]
+ sample_frames = [self.frames[i] for i in sample_indices]
+
+ # Combine sample frames into a single image for palette generation
+ # Flatten each frame to get all pixels, then stack them
+ all_pixels = np.vstack(
+ [f.reshape(-1, 3) for f in sample_frames]
+ ) # (total_pixels, 3)
+
+ # Create a properly-shaped RGB image from the pixel data
+ # We'll make a roughly square image from all the pixels
+ total_pixels = len(all_pixels)
+ width = min(512, int(np.sqrt(total_pixels))) # Reasonable width, max 512
+ height = (total_pixels + width - 1) // width # Ceiling division
+
+ # Pad if necessary to fill the rectangle
+ pixels_needed = width * height
+ if pixels_needed > total_pixels:
+ padding = np.zeros((pixels_needed - total_pixels, 3), dtype=np.uint8)
+ all_pixels = np.vstack([all_pixels, padding])
+
+ # Reshape to proper RGB image format (H, W, 3)
+ img_array = (
+ all_pixels[:pixels_needed].reshape(height, width, 3).astype(np.uint8)
+ )
+ combined_img = Image.fromarray(img_array, mode="RGB")
+
+ # Generate global palette
+ global_palette = combined_img.quantize(colors=num_colors, method=2)
+
+ # Apply global palette to all frames
+ for frame in self.frames:
+ pil_frame = Image.fromarray(frame)
+ quantized = pil_frame.quantize(palette=global_palette, dither=1)
+ optimized.append(np.array(quantized.convert("RGB")))
+ else:
+ # Use per-frame quantization
+ for frame in self.frames:
+ pil_frame = Image.fromarray(frame)
+ quantized = pil_frame.quantize(colors=num_colors, method=2, dither=1)
+ optimized.append(np.array(quantized.convert("RGB")))
+
+ return optimized
+
+ def deduplicate_frames(self, threshold: float = 0.9995) -> int:
+ """
+ Remove duplicate or near-duplicate consecutive frames.
+
+ Args:
+ threshold: Similarity threshold (0.0-1.0). Higher = more strict (0.9995 = nearly identical).
+ Use 0.9995+ to preserve subtle animations, 0.98 for aggressive removal.
+
+ Returns:
+ Number of frames removed
+ """
+ if len(self.frames) < 2:
+ return 0
+
+ deduplicated = [self.frames[0]]
+ removed_count = 0
+
+ for i in range(1, len(self.frames)):
+ # Compare with previous frame
+ prev_frame = np.array(deduplicated[-1], dtype=np.float32)
+ curr_frame = np.array(self.frames[i], dtype=np.float32)
+
+ # Calculate similarity (normalized)
+ diff = np.abs(prev_frame - curr_frame)
+ similarity = 1.0 - (np.mean(diff) / 255.0)
+
+ # Keep frame if sufficiently different
+ # High threshold (0.9995+) means only remove nearly identical frames
+ if similarity < threshold:
+ deduplicated.append(self.frames[i])
+ else:
+ removed_count += 1
+
+ self.frames = deduplicated
+ return removed_count
+
+ def save(
+ self,
+ output_path: str | Path,
+ num_colors: int = 128,
+ optimize_for_emoji: bool = False,
+ remove_duplicates: bool = False,
+ ) -> dict:
+ """
+ Save frames as optimized GIF for Slack.
+
+ Args:
+ output_path: Where to save the GIF
+ num_colors: Number of colors to use (fewer = smaller file)
+ optimize_for_emoji: If True, optimize for emoji size (128x128, fewer colors)
+ remove_duplicates: If True, remove duplicate consecutive frames (opt-in)
+
+ Returns:
+ Dictionary with file info (path, size, dimensions, frame_count)
+ """
+ if not self.frames:
+ raise ValueError("No frames to save. Add frames with add_frame() first.")
+
+ output_path = Path(output_path)
+
+ # Remove duplicate frames to reduce file size
+ if remove_duplicates:
+ removed = self.deduplicate_frames(threshold=0.9995)
+ if removed > 0:
+ print(
+ f" Removed {removed} nearly identical frames (preserved subtle animations)"
+ )
+
+ # Optimize for emoji if requested
+ if optimize_for_emoji:
+ if self.width > 128 or self.height > 128:
+ print(
+ f" Resizing from {self.width}x{self.height} to 128x128 for emoji"
+ )
+ self.width = 128
+ self.height = 128
+ # Resize all frames
+ resized_frames = []
+ for frame in self.frames:
+ pil_frame = Image.fromarray(frame)
+ pil_frame = pil_frame.resize((128, 128), Image.Resampling.LANCZOS)
+ resized_frames.append(np.array(pil_frame))
+ self.frames = resized_frames
+ num_colors = min(num_colors, 48) # More aggressive color limit for emoji
+
+ # More aggressive FPS reduction for emoji
+ if len(self.frames) > 12:
+ print(
+ f" Reducing frames from {len(self.frames)} to ~12 for emoji size"
+ )
+ # Keep every nth frame to get close to 12 frames
+ keep_every = max(1, len(self.frames) // 12)
+ self.frames = [
+ self.frames[i] for i in range(0, len(self.frames), keep_every)
+ ]
+
+ # Optimize colors with global palette
+ optimized_frames = self.optimize_colors(num_colors, use_global_palette=True)
+
+ # Calculate frame duration in milliseconds
+ frame_duration = 1000 / self.fps
+
+ # Save GIF
+ imageio.imwrite(
+ output_path,
+ optimized_frames,
+ duration=frame_duration,
+ loop=0, # Infinite loop
+ )
+
+ # Get file info
+ file_size_kb = output_path.stat().st_size / 1024
+ file_size_mb = file_size_kb / 1024
+
+ info = {
+ "path": str(output_path),
+ "size_kb": file_size_kb,
+ "size_mb": file_size_mb,
+ "dimensions": f"{self.width}x{self.height}",
+ "frame_count": len(optimized_frames),
+ "fps": self.fps,
+ "duration_seconds": len(optimized_frames) / self.fps,
+ "colors": num_colors,
+ }
+
+ # Print info
+ print(f"\n✓ GIF created successfully!")
+ print(f" Path: {output_path}")
+ print(f" Size: {file_size_kb:.1f} KB ({file_size_mb:.2f} MB)")
+ print(f" Dimensions: {self.width}x{self.height}")
+ print(f" Frames: {len(optimized_frames)} @ {self.fps} fps")
+ print(f" Duration: {info['duration_seconds']:.1f}s")
+ print(f" Colors: {num_colors}")
+
+ # Size info
+ if optimize_for_emoji:
+ print(f" Optimized for emoji (128x128, reduced colors)")
+ if file_size_mb > 1.0:
+ print(f"\n Note: Large file size ({file_size_kb:.1f} KB)")
+ print(" Consider: fewer frames, smaller dimensions, or fewer colors")
+
+ return info
+
+ def clear(self):
+ """Clear all frames (useful for creating multiple GIFs)."""
+ self.frames = []
diff --git a/.claude/skills/slack-gif-creator/core/validators.py b/.claude/skills/slack-gif-creator/core/validators.py
new file mode 100644
index 0000000..a6f5bdf
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/core/validators.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+"""
+Validators - Check if GIFs meet Slack's requirements.
+
+These validators help ensure your GIFs meet Slack's size and dimension constraints.
+"""
+
+from pathlib import Path
+
+
+def validate_gif(
+ gif_path: str | Path, is_emoji: bool = True, verbose: bool = True
+) -> tuple[bool, dict]:
+ """
+ Validate GIF for Slack (dimensions, size, frame count).
+
+ Args:
+ gif_path: Path to GIF file
+ is_emoji: True for emoji (128x128 recommended), False for message GIF
+ verbose: Print validation details
+
+ Returns:
+ Tuple of (passes: bool, results: dict with all details)
+ """
+ from PIL import Image
+
+ gif_path = Path(gif_path)
+
+ if not gif_path.exists():
+ return False, {"error": f"File not found: {gif_path}"}
+
+ # Get file size
+ size_bytes = gif_path.stat().st_size
+ size_kb = size_bytes / 1024
+ size_mb = size_kb / 1024
+
+ # Get dimensions and frame info
+ try:
+ with Image.open(gif_path) as img:
+ width, height = img.size
+
+ # Count frames
+ frame_count = 0
+ try:
+ while True:
+ img.seek(frame_count)
+ frame_count += 1
+ except EOFError:
+ pass
+
+ # Get duration
+ try:
+ duration_ms = img.info.get("duration", 100)
+ total_duration = (duration_ms * frame_count) / 1000
+ fps = frame_count / total_duration if total_duration > 0 else 0
+ except:
+ total_duration = None
+ fps = None
+
+ except Exception as e:
+ return False, {"error": f"Failed to read GIF: {e}"}
+
+ # Validate dimensions
+ if is_emoji:
+ optimal = width == height == 128
+ acceptable = width == height and 64 <= width <= 128
+ dim_pass = acceptable
+ else:
+ aspect_ratio = (
+ max(width, height) / min(width, height)
+ if min(width, height) > 0
+ else float("inf")
+ )
+ dim_pass = aspect_ratio <= 2.0 and 320 <= min(width, height) <= 640
+
+ results = {
+ "file": str(gif_path),
+ "passes": dim_pass,
+ "width": width,
+ "height": height,
+ "size_kb": size_kb,
+ "size_mb": size_mb,
+ "frame_count": frame_count,
+ "duration_seconds": total_duration,
+ "fps": fps,
+ "is_emoji": is_emoji,
+ "optimal": optimal if is_emoji else None,
+ }
+
+ # Print if verbose
+ if verbose:
+ print(f"\nValidating {gif_path.name}:")
+ print(
+ f" Dimensions: {width}x{height}"
+ + (
+ f" ({'optimal' if optimal else 'acceptable'})"
+ if is_emoji and acceptable
+ else ""
+ )
+ )
+ print(
+ f" Size: {size_kb:.1f} KB"
+ + (f" ({size_mb:.2f} MB)" if size_mb >= 1.0 else "")
+ )
+ print(
+ f" Frames: {frame_count}"
+ + (f" @ {fps:.1f} fps ({total_duration:.1f}s)" if fps else "")
+ )
+
+ if not dim_pass:
+ print(
+ f" Note: {'Emoji should be 128x128' if is_emoji else 'Unusual dimensions for Slack'}"
+ )
+
+ if size_mb > 5.0:
+ print(f" Note: Large file size - consider fewer frames/colors")
+
+ return dim_pass, results
+
+
+def is_slack_ready(
+ gif_path: str | Path, is_emoji: bool = True, verbose: bool = True
+) -> bool:
+ """
+ Quick check if GIF is ready for Slack.
+
+ Args:
+ gif_path: Path to GIF file
+ is_emoji: True for emoji GIF, False for message GIF
+ verbose: Print feedback
+
+ Returns:
+ True if dimensions are acceptable
+ """
+ passes, _ = validate_gif(gif_path, is_emoji, verbose)
+ return passes
diff --git a/.claude/skills/slack-gif-creator/requirements.txt b/.claude/skills/slack-gif-creator/requirements.txt
new file mode 100644
index 0000000..8bc4493
--- /dev/null
+++ b/.claude/skills/slack-gif-creator/requirements.txt
@@ -0,0 +1,4 @@
+pillow>=10.0.0
+imageio>=2.31.0
+imageio-ffmpeg>=0.4.9
+numpy>=1.24.0
\ No newline at end of file
diff --git a/.claude/skills/template/.openskills.json b/.claude/skills/template/.openskills.json
new file mode 100644
index 0000000..58d5cbb
--- /dev/null
+++ b/.claude/skills/template/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "template",
+ "installedAt": "2026-03-02T09:19:50.198Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/template/SKILL.md b/.claude/skills/template/SKILL.md
new file mode 100644
index 0000000..50a4f9b
--- /dev/null
+++ b/.claude/skills/template/SKILL.md
@@ -0,0 +1,6 @@
+---
+name: template-skill
+description: Replace with description of the skill and when Claude should use it.
+---
+
+# Insert instructions below
diff --git a/.claude/skills/theme-factory/.openskills.json b/.claude/skills/theme-factory/.openskills.json
new file mode 100644
index 0000000..a817fa9
--- /dev/null
+++ b/.claude/skills/theme-factory/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\theme-factory",
+ "installedAt": "2026-03-02T09:19:50.165Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/theme-factory/LICENSE.txt b/.claude/skills/theme-factory/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/theme-factory/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/theme-factory/SKILL.md b/.claude/skills/theme-factory/SKILL.md
new file mode 100644
index 0000000..90dfcea
--- /dev/null
+++ b/.claude/skills/theme-factory/SKILL.md
@@ -0,0 +1,59 @@
+---
+name: theme-factory
+description: Toolkit for styling artifacts with a theme. These artifacts can be slides, docs, reportings, HTML landing pages, etc. There are 10 pre-set themes with colors/fonts that you can apply to any artifact that has been creating, or can generate a new theme on-the-fly.
+license: Complete terms in LICENSE.txt
+---
+
+
+# Theme Factory Skill
+
+This skill provides a curated collection of professional font and color themes themes, each with carefully selected color palettes and font pairings. Once a theme is chosen, it can be applied to any artifact.
+
+## Purpose
+
+To apply consistent, professional styling to presentation slide decks, use this skill. Each theme includes:
+- A cohesive color palette with hex codes
+- Complementary font pairings for headers and body text
+- A distinct visual identity suitable for different contexts and audiences
+
+## Usage Instructions
+
+To apply styling to a slide deck or other artifact:
+
+1. **Show the theme showcase**: Display the `theme-showcase.pdf` file to allow users to see all available themes visually. Do not make any modifications to it; simply show the file for viewing.
+2. **Ask for their choice**: Ask which theme to apply to the deck
+3. **Wait for selection**: Get explicit confirmation about the chosen theme
+4. **Apply the theme**: Once a theme has been chosen, apply the selected theme's colors and fonts to the deck/artifact
+
+## Themes Available
+
+The following 10 themes are available, each showcased in `theme-showcase.pdf`:
+
+1. **Ocean Depths** - Professional and calming maritime theme
+2. **Sunset Boulevard** - Warm and vibrant sunset colors
+3. **Forest Canopy** - Natural and grounded earth tones
+4. **Modern Minimalist** - Clean and contemporary grayscale
+5. **Golden Hour** - Rich and warm autumnal palette
+6. **Arctic Frost** - Cool and crisp winter-inspired theme
+7. **Desert Rose** - Soft and sophisticated dusty tones
+8. **Tech Innovation** - Bold and modern tech aesthetic
+9. **Botanical Garden** - Fresh and organic garden colors
+10. **Midnight Galaxy** - Dramatic and cosmic deep tones
+
+## Theme Details
+
+Each theme is defined in the `themes/` directory with complete specifications including:
+- Cohesive color palette with hex codes
+- Complementary font pairings for headers and body text
+- Distinct visual identity suitable for different contexts and audiences
+
+## Application Process
+
+After a preferred theme is selected:
+1. Read the corresponding theme file from the `themes/` directory
+2. Apply the specified colors and fonts consistently throughout the deck
+3. Ensure proper contrast and readability
+4. Maintain the theme's visual identity across all slides
+
+## Create your Own Theme
+To handle cases where none of the existing themes work for an artifact, create a custom theme. Based on provided inputs, generate a new theme similar to the ones above. Give the theme a similar name describing what the font/color combinations represent. Use any basic description provided to choose appropriate colors/fonts. After generating the theme, show it for review and verification. Following that, apply the theme as described above.
diff --git a/.claude/skills/theme-factory/theme-showcase.pdf b/.claude/skills/theme-factory/theme-showcase.pdf
new file mode 100644
index 0000000..24495d1
Binary files /dev/null and b/.claude/skills/theme-factory/theme-showcase.pdf differ
diff --git a/.claude/skills/theme-factory/themes/arctic-frost.md b/.claude/skills/theme-factory/themes/arctic-frost.md
new file mode 100644
index 0000000..e9f1eb0
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/arctic-frost.md
@@ -0,0 +1,19 @@
+# Arctic Frost
+
+A cool and crisp winter-inspired theme that conveys clarity, precision, and professionalism.
+
+## Color Palette
+
+- **Ice Blue**: `#d4e4f7` - Light backgrounds and highlights
+- **Steel Blue**: `#4a6fa5` - Primary accent color
+- **Silver**: `#c0c0c0` - Metallic accent elements
+- **Crisp White**: `#fafafa` - Clean backgrounds and text
+
+## Typography
+
+- **Headers**: DejaVu Sans Bold
+- **Body Text**: DejaVu Sans
+
+## Best Used For
+
+Healthcare presentations, technology solutions, winter sports, clean tech, pharmaceutical content.
diff --git a/.claude/skills/theme-factory/themes/botanical-garden.md b/.claude/skills/theme-factory/themes/botanical-garden.md
new file mode 100644
index 0000000..0c95bf7
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/botanical-garden.md
@@ -0,0 +1,19 @@
+# Botanical Garden
+
+A fresh and organic theme featuring vibrant garden-inspired colors for lively presentations.
+
+## Color Palette
+
+- **Fern Green**: `#4a7c59` - Rich natural green
+- **Marigold**: `#f9a620` - Bright floral accent
+- **Terracotta**: `#b7472a` - Earthy warm tone
+- **Cream**: `#f5f3ed` - Soft neutral backgrounds
+
+## Typography
+
+- **Headers**: DejaVu Serif Bold
+- **Body Text**: DejaVu Sans
+
+## Best Used For
+
+Garden centers, food presentations, farm-to-table content, botanical brands, natural products.
diff --git a/.claude/skills/theme-factory/themes/desert-rose.md b/.claude/skills/theme-factory/themes/desert-rose.md
new file mode 100644
index 0000000..ea7c74e
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/desert-rose.md
@@ -0,0 +1,19 @@
+# Desert Rose
+
+A soft and sophisticated theme with dusty, muted tones perfect for elegant presentations.
+
+## Color Palette
+
+- **Dusty Rose**: `#d4a5a5` - Soft primary color
+- **Clay**: `#b87d6d` - Earthy accent
+- **Sand**: `#e8d5c4` - Warm neutral backgrounds
+- **Deep Burgundy**: `#5d2e46` - Rich dark contrast
+
+## Typography
+
+- **Headers**: FreeSans Bold
+- **Body Text**: FreeSans
+
+## Best Used For
+
+Fashion presentations, beauty brands, wedding planning, interior design, boutique businesses.
diff --git a/.claude/skills/theme-factory/themes/forest-canopy.md b/.claude/skills/theme-factory/themes/forest-canopy.md
new file mode 100644
index 0000000..90c2b26
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/forest-canopy.md
@@ -0,0 +1,19 @@
+# Forest Canopy
+
+A natural and grounded theme featuring earth tones inspired by dense forest environments.
+
+## Color Palette
+
+- **Forest Green**: `#2d4a2b` - Primary dark green
+- **Sage**: `#7d8471` - Muted green accent
+- **Olive**: `#a4ac86` - Light accent color
+- **Ivory**: `#faf9f6` - Backgrounds and text
+
+## Typography
+
+- **Headers**: FreeSerif Bold
+- **Body Text**: FreeSans
+
+## Best Used For
+
+Environmental presentations, sustainability reports, outdoor brands, wellness content, organic products.
diff --git a/.claude/skills/theme-factory/themes/golden-hour.md b/.claude/skills/theme-factory/themes/golden-hour.md
new file mode 100644
index 0000000..ed8fc25
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/golden-hour.md
@@ -0,0 +1,19 @@
+# Golden Hour
+
+A rich and warm autumnal palette that creates an inviting and sophisticated atmosphere.
+
+## Color Palette
+
+- **Mustard Yellow**: `#f4a900` - Bold primary accent
+- **Terracotta**: `#c1666b` - Warm secondary color
+- **Warm Beige**: `#d4b896` - Neutral backgrounds
+- **Chocolate Brown**: `#4a403a` - Dark text and anchors
+
+## Typography
+
+- **Headers**: FreeSans Bold
+- **Body Text**: FreeSans
+
+## Best Used For
+
+Restaurant presentations, hospitality brands, fall campaigns, cozy lifestyle content, artisan products.
diff --git a/.claude/skills/theme-factory/themes/midnight-galaxy.md b/.claude/skills/theme-factory/themes/midnight-galaxy.md
new file mode 100644
index 0000000..97e1c5f
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/midnight-galaxy.md
@@ -0,0 +1,19 @@
+# Midnight Galaxy
+
+A dramatic and cosmic theme with deep purples and mystical tones for impactful presentations.
+
+## Color Palette
+
+- **Deep Purple**: `#2b1e3e` - Rich dark base
+- **Cosmic Blue**: `#4a4e8f` - Mystical mid-tone
+- **Lavender**: `#a490c2` - Soft accent color
+- **Silver**: `#e6e6fa` - Light highlights and text
+
+## Typography
+
+- **Headers**: FreeSans Bold
+- **Body Text**: FreeSans
+
+## Best Used For
+
+Entertainment industry, gaming presentations, nightlife venues, luxury brands, creative agencies.
diff --git a/.claude/skills/theme-factory/themes/modern-minimalist.md b/.claude/skills/theme-factory/themes/modern-minimalist.md
new file mode 100644
index 0000000..6bd26a2
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/modern-minimalist.md
@@ -0,0 +1,19 @@
+# Modern Minimalist
+
+A clean and contemporary theme with a sophisticated grayscale palette for maximum versatility.
+
+## Color Palette
+
+- **Charcoal**: `#36454f` - Primary dark color
+- **Slate Gray**: `#708090` - Medium gray for accents
+- **Light Gray**: `#d3d3d3` - Backgrounds and dividers
+- **White**: `#ffffff` - Text and clean backgrounds
+
+## Typography
+
+- **Headers**: DejaVu Sans Bold
+- **Body Text**: DejaVu Sans
+
+## Best Used For
+
+Tech presentations, architecture portfolios, design showcases, modern business proposals, data visualization.
diff --git a/.claude/skills/theme-factory/themes/ocean-depths.md b/.claude/skills/theme-factory/themes/ocean-depths.md
new file mode 100644
index 0000000..b675126
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/ocean-depths.md
@@ -0,0 +1,19 @@
+# Ocean Depths
+
+A professional and calming maritime theme that evokes the serenity of deep ocean waters.
+
+## Color Palette
+
+- **Deep Navy**: `#1a2332` - Primary background color
+- **Teal**: `#2d8b8b` - Accent color for highlights and emphasis
+- **Seafoam**: `#a8dadc` - Secondary accent for lighter elements
+- **Cream**: `#f1faee` - Text and light backgrounds
+
+## Typography
+
+- **Headers**: DejaVu Sans Bold
+- **Body Text**: DejaVu Sans
+
+## Best Used For
+
+Corporate presentations, financial reports, professional consulting decks, trust-building content.
diff --git a/.claude/skills/theme-factory/themes/sunset-boulevard.md b/.claude/skills/theme-factory/themes/sunset-boulevard.md
new file mode 100644
index 0000000..df799a0
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/sunset-boulevard.md
@@ -0,0 +1,19 @@
+# Sunset Boulevard
+
+A warm and vibrant theme inspired by golden hour sunsets, perfect for energetic and creative presentations.
+
+## Color Palette
+
+- **Burnt Orange**: `#e76f51` - Primary accent color
+- **Coral**: `#f4a261` - Secondary warm accent
+- **Warm Sand**: `#e9c46a` - Highlighting and backgrounds
+- **Deep Purple**: `#264653` - Dark contrast and text
+
+## Typography
+
+- **Headers**: DejaVu Serif Bold
+- **Body Text**: DejaVu Sans
+
+## Best Used For
+
+Creative pitches, marketing presentations, lifestyle brands, event promotions, inspirational content.
diff --git a/.claude/skills/theme-factory/themes/tech-innovation.md b/.claude/skills/theme-factory/themes/tech-innovation.md
new file mode 100644
index 0000000..e029a43
--- /dev/null
+++ b/.claude/skills/theme-factory/themes/tech-innovation.md
@@ -0,0 +1,19 @@
+# Tech Innovation
+
+A bold and modern theme with high-contrast colors perfect for cutting-edge technology presentations.
+
+## Color Palette
+
+- **Electric Blue**: `#0066ff` - Vibrant primary accent
+- **Neon Cyan**: `#00ffff` - Bright highlight color
+- **Dark Gray**: `#1e1e1e` - Deep backgrounds
+- **White**: `#ffffff` - Clean text and contrast
+
+## Typography
+
+- **Headers**: DejaVu Sans Bold
+- **Body Text**: DejaVu Sans
+
+## Best Used For
+
+Tech startups, software launches, innovation showcases, AI/ML presentations, digital transformation content.
diff --git a/.claude/skills/web-artifacts-builder/.openskills.json b/.claude/skills/web-artifacts-builder/.openskills.json
new file mode 100644
index 0000000..52e986f
--- /dev/null
+++ b/.claude/skills/web-artifacts-builder/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\web-artifacts-builder",
+ "installedAt": "2026-03-02T09:19:50.169Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/web-artifacts-builder/LICENSE.txt b/.claude/skills/web-artifacts-builder/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/web-artifacts-builder/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/web-artifacts-builder/SKILL.md b/.claude/skills/web-artifacts-builder/SKILL.md
new file mode 100644
index 0000000..8b39b19
--- /dev/null
+++ b/.claude/skills/web-artifacts-builder/SKILL.md
@@ -0,0 +1,74 @@
+---
+name: web-artifacts-builder
+description: Suite of tools for creating elaborate, multi-component claude.ai HTML artifacts using modern frontend web technologies (React, Tailwind CSS, shadcn/ui). Use for complex artifacts requiring state management, routing, or shadcn/ui components - not for simple single-file HTML/JSX artifacts.
+license: Complete terms in LICENSE.txt
+---
+
+# Web Artifacts Builder
+
+To build powerful frontend claude.ai artifacts, follow these steps:
+1. Initialize the frontend repo using `scripts/init-artifact.sh`
+2. Develop your artifact by editing the generated code
+3. Bundle all code into a single HTML file using `scripts/bundle-artifact.sh`
+4. Display artifact to user
+5. (Optional) Test the artifact
+
+**Stack**: React 18 + TypeScript + Vite + Parcel (bundling) + Tailwind CSS + shadcn/ui
+
+## Design & Style Guidelines
+
+VERY IMPORTANT: To avoid what is often referred to as "AI slop", avoid using excessive centered layouts, purple gradients, uniform rounded corners, and Inter font.
+
+## Quick Start
+
+### Step 1: Initialize Project
+
+Run the initialization script to create a new React project:
+```bash
+bash scripts/init-artifact.sh
+cd
+```
+
+This creates a fully configured project with:
+- ✅ React + TypeScript (via Vite)
+- ✅ Tailwind CSS 3.4.1 with shadcn/ui theming system
+- ✅ Path aliases (`@/`) configured
+- ✅ 40+ shadcn/ui components pre-installed
+- ✅ All Radix UI dependencies included
+- ✅ Parcel configured for bundling (via .parcelrc)
+- ✅ Node 18+ compatibility (auto-detects and pins Vite version)
+
+### Step 2: Develop Your Artifact
+
+To build the artifact, edit the generated files. See **Common Development Tasks** below for guidance.
+
+### Step 3: Bundle to Single HTML File
+
+To bundle the React app into a single HTML artifact:
+```bash
+bash scripts/bundle-artifact.sh
+```
+
+This creates `bundle.html` - a self-contained artifact with all JavaScript, CSS, and dependencies inlined. This file can be directly shared in Claude conversations as an artifact.
+
+**Requirements**: Your project must have an `index.html` in the root directory.
+
+**What the script does**:
+- Installs bundling dependencies (parcel, @parcel/config-default, parcel-resolver-tspaths, html-inline)
+- Creates `.parcelrc` config with path alias support
+- Builds with Parcel (no source maps)
+- Inlines all assets into single HTML using html-inline
+
+### Step 4: Share Artifact with User
+
+Finally, share the bundled HTML file in conversation with the user so they can view it as an artifact.
+
+### Step 5: Testing/Visualizing the Artifact (Optional)
+
+Note: This is a completely optional step. Only perform if necessary or requested.
+
+To test/visualize the artifact, use available tools (including other Skills or built-in tools like Playwright or Puppeteer). In general, avoid testing the artifact upfront as it adds latency between the request and when the finished artifact can be seen. Test later, after presenting the artifact, if requested or if issues arise.
+
+## Reference
+
+- **shadcn/ui components**: https://ui.shadcn.com/docs/components
\ No newline at end of file
diff --git a/.claude/skills/web-artifacts-builder/scripts/bundle-artifact.sh b/.claude/skills/web-artifacts-builder/scripts/bundle-artifact.sh
new file mode 100644
index 0000000..c13d229
--- /dev/null
+++ b/.claude/skills/web-artifacts-builder/scripts/bundle-artifact.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+set -e
+
+echo "📦 Bundling React app to single HTML artifact..."
+
+# Check if we're in a project directory
+if [ ! -f "package.json" ]; then
+ echo "❌ Error: No package.json found. Run this script from your project root."
+ exit 1
+fi
+
+# Check if index.html exists
+if [ ! -f "index.html" ]; then
+ echo "❌ Error: No index.html found in project root."
+ echo " This script requires an index.html entry point."
+ exit 1
+fi
+
+# Install bundling dependencies
+echo "📦 Installing bundling dependencies..."
+pnpm add -D parcel @parcel/config-default parcel-resolver-tspaths html-inline
+
+# Create Parcel config with tspaths resolver
+if [ ! -f ".parcelrc" ]; then
+ echo "🔧 Creating Parcel configuration with path alias support..."
+ cat > .parcelrc << 'EOF'
+{
+ "extends": "@parcel/config-default",
+ "resolvers": ["parcel-resolver-tspaths", "..."]
+}
+EOF
+fi
+
+# Clean previous build
+echo "🧹 Cleaning previous build..."
+rm -rf dist bundle.html
+
+# Build with Parcel
+echo "🔨 Building with Parcel..."
+pnpm exec parcel build index.html --dist-dir dist --no-source-maps
+
+# Inline everything into single HTML
+echo "🎯 Inlining all assets into single HTML file..."
+pnpm exec html-inline dist/index.html > bundle.html
+
+# Get file size
+FILE_SIZE=$(du -h bundle.html | cut -f1)
+
+echo ""
+echo "✅ Bundle complete!"
+echo "📄 Output: bundle.html ($FILE_SIZE)"
+echo ""
+echo "You can now use this single HTML file as an artifact in Claude conversations."
+echo "To test locally: open bundle.html in your browser"
\ No newline at end of file
diff --git a/.claude/skills/web-artifacts-builder/scripts/init-artifact.sh b/.claude/skills/web-artifacts-builder/scripts/init-artifact.sh
new file mode 100644
index 0000000..7d1022d
--- /dev/null
+++ b/.claude/skills/web-artifacts-builder/scripts/init-artifact.sh
@@ -0,0 +1,322 @@
+#!/bin/bash
+
+# Exit on error
+set -e
+
+# Detect Node version
+NODE_VERSION=$(node -v | cut -d'v' -f2 | cut -d'.' -f1)
+
+echo "🔍 Detected Node.js version: $NODE_VERSION"
+
+if [ "$NODE_VERSION" -lt 18 ]; then
+ echo "❌ Error: Node.js 18 or higher is required"
+ echo " Current version: $(node -v)"
+ exit 1
+fi
+
+# Set Vite version based on Node version
+if [ "$NODE_VERSION" -ge 20 ]; then
+ VITE_VERSION="latest"
+ echo "✅ Using Vite latest (Node 20+)"
+else
+ VITE_VERSION="5.4.11"
+ echo "✅ Using Vite $VITE_VERSION (Node 18 compatible)"
+fi
+
+# Detect OS and set sed syntax
+if [[ "$OSTYPE" == "darwin"* ]]; then
+ SED_INPLACE="sed -i ''"
+else
+ SED_INPLACE="sed -i"
+fi
+
+# Check if pnpm is installed
+if ! command -v pnpm &> /dev/null; then
+ echo "📦 pnpm not found. Installing pnpm..."
+ npm install -g pnpm
+fi
+
+# Check if project name is provided
+if [ -z "$1" ]; then
+ echo "❌ Usage: ./create-react-shadcn-complete.sh "
+ exit 1
+fi
+
+PROJECT_NAME="$1"
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+COMPONENTS_TARBALL="$SCRIPT_DIR/shadcn-components.tar.gz"
+
+# Check if components tarball exists
+if [ ! -f "$COMPONENTS_TARBALL" ]; then
+ echo "❌ Error: shadcn-components.tar.gz not found in script directory"
+ echo " Expected location: $COMPONENTS_TARBALL"
+ exit 1
+fi
+
+echo "🚀 Creating new React + Vite project: $PROJECT_NAME"
+
+# Create new Vite project (always use latest create-vite, pin vite version later)
+pnpm create vite "$PROJECT_NAME" --template react-ts
+
+# Navigate into project directory
+cd "$PROJECT_NAME"
+
+echo "🧹 Cleaning up Vite template..."
+$SED_INPLACE '/ .*<\/title>/'"$PROJECT_NAME"'<\/title>/' index.html
+
+echo "📦 Installing base dependencies..."
+pnpm install
+
+# Pin Vite version for Node 18
+if [ "$NODE_VERSION" -lt 20 ]; then
+ echo "📌 Pinning Vite to $VITE_VERSION for Node 18 compatibility..."
+ pnpm add -D vite@$VITE_VERSION
+fi
+
+echo "📦 Installing Tailwind CSS and dependencies..."
+pnpm install -D tailwindcss@3.4.1 postcss autoprefixer @types/node tailwindcss-animate
+pnpm install class-variance-authority clsx tailwind-merge lucide-react next-themes
+
+echo "⚙️ Creating Tailwind and PostCSS configuration..."
+cat > postcss.config.js << 'EOF'
+export default {
+ plugins: {
+ tailwindcss: {},
+ autoprefixer: {},
+ },
+}
+EOF
+
+echo "📝 Configuring Tailwind with shadcn theme..."
+cat > tailwind.config.js << 'EOF'
+/** @type {import('tailwindcss').Config} */
+module.exports = {
+ darkMode: ["class"],
+ content: [
+ "./index.html",
+ "./src/**/*.{js,ts,jsx,tsx}",
+ ],
+ theme: {
+ extend: {
+ colors: {
+ border: "hsl(var(--border))",
+ input: "hsl(var(--input))",
+ ring: "hsl(var(--ring))",
+ background: "hsl(var(--background))",
+ foreground: "hsl(var(--foreground))",
+ primary: {
+ DEFAULT: "hsl(var(--primary))",
+ foreground: "hsl(var(--primary-foreground))",
+ },
+ secondary: {
+ DEFAULT: "hsl(var(--secondary))",
+ foreground: "hsl(var(--secondary-foreground))",
+ },
+ destructive: {
+ DEFAULT: "hsl(var(--destructive))",
+ foreground: "hsl(var(--destructive-foreground))",
+ },
+ muted: {
+ DEFAULT: "hsl(var(--muted))",
+ foreground: "hsl(var(--muted-foreground))",
+ },
+ accent: {
+ DEFAULT: "hsl(var(--accent))",
+ foreground: "hsl(var(--accent-foreground))",
+ },
+ popover: {
+ DEFAULT: "hsl(var(--popover))",
+ foreground: "hsl(var(--popover-foreground))",
+ },
+ card: {
+ DEFAULT: "hsl(var(--card))",
+ foreground: "hsl(var(--card-foreground))",
+ },
+ },
+ borderRadius: {
+ lg: "var(--radius)",
+ md: "calc(var(--radius) - 2px)",
+ sm: "calc(var(--radius) - 4px)",
+ },
+ keyframes: {
+ "accordion-down": {
+ from: { height: "0" },
+ to: { height: "var(--radix-accordion-content-height)" },
+ },
+ "accordion-up": {
+ from: { height: "var(--radix-accordion-content-height)" },
+ to: { height: "0" },
+ },
+ },
+ animation: {
+ "accordion-down": "accordion-down 0.2s ease-out",
+ "accordion-up": "accordion-up 0.2s ease-out",
+ },
+ },
+ },
+ plugins: [require("tailwindcss-animate")],
+}
+EOF
+
+# Add Tailwind directives and CSS variables to index.css
+echo "🎨 Adding Tailwind directives and CSS variables..."
+cat > src/index.css << 'EOF'
+@tailwind base;
+@tailwind components;
+@tailwind utilities;
+
+@layer base {
+ :root {
+ --background: 0 0% 100%;
+ --foreground: 0 0% 3.9%;
+ --card: 0 0% 100%;
+ --card-foreground: 0 0% 3.9%;
+ --popover: 0 0% 100%;
+ --popover-foreground: 0 0% 3.9%;
+ --primary: 0 0% 9%;
+ --primary-foreground: 0 0% 98%;
+ --secondary: 0 0% 96.1%;
+ --secondary-foreground: 0 0% 9%;
+ --muted: 0 0% 96.1%;
+ --muted-foreground: 0 0% 45.1%;
+ --accent: 0 0% 96.1%;
+ --accent-foreground: 0 0% 9%;
+ --destructive: 0 84.2% 60.2%;
+ --destructive-foreground: 0 0% 98%;
+ --border: 0 0% 89.8%;
+ --input: 0 0% 89.8%;
+ --ring: 0 0% 3.9%;
+ --radius: 0.5rem;
+ }
+
+ .dark {
+ --background: 0 0% 3.9%;
+ --foreground: 0 0% 98%;
+ --card: 0 0% 3.9%;
+ --card-foreground: 0 0% 98%;
+ --popover: 0 0% 3.9%;
+ --popover-foreground: 0 0% 98%;
+ --primary: 0 0% 98%;
+ --primary-foreground: 0 0% 9%;
+ --secondary: 0 0% 14.9%;
+ --secondary-foreground: 0 0% 98%;
+ --muted: 0 0% 14.9%;
+ --muted-foreground: 0 0% 63.9%;
+ --accent: 0 0% 14.9%;
+ --accent-foreground: 0 0% 98%;
+ --destructive: 0 62.8% 30.6%;
+ --destructive-foreground: 0 0% 98%;
+ --border: 0 0% 14.9%;
+ --input: 0 0% 14.9%;
+ --ring: 0 0% 83.1%;
+ }
+}
+
+@layer base {
+ * {
+ @apply border-border;
+ }
+ body {
+ @apply bg-background text-foreground;
+ }
+}
+EOF
+
+# Add path aliases to tsconfig.json
+echo "🔧 Adding path aliases to tsconfig.json..."
+node -e "
+const fs = require('fs');
+const config = JSON.parse(fs.readFileSync('tsconfig.json', 'utf8'));
+config.compilerOptions = config.compilerOptions || {};
+config.compilerOptions.baseUrl = '.';
+config.compilerOptions.paths = { '@/*': ['./src/*'] };
+fs.writeFileSync('tsconfig.json', JSON.stringify(config, null, 2));
+"
+
+# Add path aliases to tsconfig.app.json
+echo "🔧 Adding path aliases to tsconfig.app.json..."
+node -e "
+const fs = require('fs');
+const path = 'tsconfig.app.json';
+const content = fs.readFileSync(path, 'utf8');
+// Remove comments manually
+const lines = content.split('\n').filter(line => !line.trim().startsWith('//'));
+const jsonContent = lines.join('\n');
+const config = JSON.parse(jsonContent.replace(/\/\*[\s\S]*?\*\//g, '').replace(/,(\s*[}\]])/g, '\$1'));
+config.compilerOptions = config.compilerOptions || {};
+config.compilerOptions.baseUrl = '.';
+config.compilerOptions.paths = { '@/*': ['./src/*'] };
+fs.writeFileSync(path, JSON.stringify(config, null, 2));
+"
+
+# Update vite.config.ts
+echo "⚙️ Updating Vite configuration..."
+cat > vite.config.ts << 'EOF'
+import path from "path";
+import react from "@vitejs/plugin-react";
+import { defineConfig } from "vite";
+
+export default defineConfig({
+ plugins: [react()],
+ resolve: {
+ alias: {
+ "@": path.resolve(__dirname, "./src"),
+ },
+ },
+});
+EOF
+
+# Install all shadcn/ui dependencies
+echo "📦 Installing shadcn/ui dependencies..."
+pnpm install @radix-ui/react-accordion @radix-ui/react-aspect-ratio @radix-ui/react-avatar @radix-ui/react-checkbox @radix-ui/react-collapsible @radix-ui/react-context-menu @radix-ui/react-dialog @radix-ui/react-dropdown-menu @radix-ui/react-hover-card @radix-ui/react-label @radix-ui/react-menubar @radix-ui/react-navigation-menu @radix-ui/react-popover @radix-ui/react-progress @radix-ui/react-radio-group @radix-ui/react-scroll-area @radix-ui/react-select @radix-ui/react-separator @radix-ui/react-slider @radix-ui/react-slot @radix-ui/react-switch @radix-ui/react-tabs @radix-ui/react-toast @radix-ui/react-toggle @radix-ui/react-toggle-group @radix-ui/react-tooltip
+pnpm install sonner cmdk vaul embla-carousel-react react-day-picker react-resizable-panels date-fns react-hook-form @hookform/resolvers zod
+
+# Extract shadcn components from tarball
+echo "📦 Extracting shadcn/ui components..."
+tar -xzf "$COMPONENTS_TARBALL" -C src/
+
+# Create components.json for reference
+echo "📝 Creating components.json config..."
+cat > components.json << 'EOF'
+{
+ "$schema": "https://ui.shadcn.com/schema.json",
+ "style": "default",
+ "rsc": false,
+ "tsx": true,
+ "tailwind": {
+ "config": "tailwind.config.js",
+ "css": "src/index.css",
+ "baseColor": "slate",
+ "cssVariables": true,
+ "prefix": ""
+ },
+ "aliases": {
+ "components": "@/components",
+ "utils": "@/lib/utils",
+ "ui": "@/components/ui",
+ "lib": "@/lib",
+ "hooks": "@/hooks"
+ }
+}
+EOF
+
+echo "✅ Setup complete! You can now use Tailwind CSS and shadcn/ui in your project."
+echo ""
+echo "📦 Included components (40+ total):"
+echo " - accordion, alert, aspect-ratio, avatar, badge, breadcrumb"
+echo " - button, calendar, card, carousel, checkbox, collapsible"
+echo " - command, context-menu, dialog, drawer, dropdown-menu"
+echo " - form, hover-card, input, label, menubar, navigation-menu"
+echo " - popover, progress, radio-group, resizable, scroll-area"
+echo " - select, separator, sheet, skeleton, slider, sonner"
+echo " - switch, table, tabs, textarea, toast, toggle, toggle-group, tooltip"
+echo ""
+echo "To start developing:"
+echo " cd $PROJECT_NAME"
+echo " pnpm dev"
+echo ""
+echo "📚 Import components like:"
+echo " import { Button } from '@/components/ui/button'"
+echo " import { Card, CardHeader, CardTitle, CardContent } from '@/components/ui/card'"
+echo " import { Dialog, DialogContent, DialogTrigger } from '@/components/ui/dialog'"
diff --git a/.claude/skills/web-artifacts-builder/scripts/shadcn-components.tar.gz b/.claude/skills/web-artifacts-builder/scripts/shadcn-components.tar.gz
new file mode 100644
index 0000000..cdbe7cd
Binary files /dev/null and b/.claude/skills/web-artifacts-builder/scripts/shadcn-components.tar.gz differ
diff --git a/.claude/skills/webapp-testing/.openskills.json b/.claude/skills/webapp-testing/.openskills.json
new file mode 100644
index 0000000..b7a90bc
--- /dev/null
+++ b/.claude/skills/webapp-testing/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\webapp-testing",
+ "installedAt": "2026-03-02T09:19:50.172Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/webapp-testing/LICENSE.txt b/.claude/skills/webapp-testing/LICENSE.txt
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/.claude/skills/webapp-testing/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/.claude/skills/webapp-testing/SKILL.md b/.claude/skills/webapp-testing/SKILL.md
new file mode 100644
index 0000000..4726215
--- /dev/null
+++ b/.claude/skills/webapp-testing/SKILL.md
@@ -0,0 +1,96 @@
+---
+name: webapp-testing
+description: Toolkit for interacting with and testing local web applications using Playwright. Supports verifying frontend functionality, debugging UI behavior, capturing browser screenshots, and viewing browser logs.
+license: Complete terms in LICENSE.txt
+---
+
+# Web Application Testing
+
+To test local web applications, write native Python Playwright scripts.
+
+**Helper Scripts Available**:
+- `scripts/with_server.py` - Manages server lifecycle (supports multiple servers)
+
+**Always run scripts with `--help` first** to see usage. DO NOT read the source until you try running the script first and find that a customized solution is abslutely necessary. These scripts can be very large and thus pollute your context window. They exist to be called directly as black-box scripts rather than ingested into your context window.
+
+## Decision Tree: Choosing Your Approach
+
+```
+User task → Is it static HTML?
+ ├─ Yes → Read HTML file directly to identify selectors
+ │ ├─ Success → Write Playwright script using selectors
+ │ └─ Fails/Incomplete → Treat as dynamic (below)
+ │
+ └─ No (dynamic webapp) → Is the server already running?
+ ├─ No → Run: python scripts/with_server.py --help
+ │ Then use the helper + write simplified Playwright script
+ │
+ └─ Yes → Reconnaissance-then-action:
+ 1. Navigate and wait for networkidle
+ 2. Take screenshot or inspect DOM
+ 3. Identify selectors from rendered state
+ 4. Execute actions with discovered selectors
+```
+
+## Example: Using with_server.py
+
+To start a server, run `--help` first, then use the helper:
+
+**Single server:**
+```bash
+python scripts/with_server.py --server "npm run dev" --port 5173 -- python your_automation.py
+```
+
+**Multiple servers (e.g., backend + frontend):**
+```bash
+python scripts/with_server.py \
+ --server "cd backend && python server.py" --port 3000 \
+ --server "cd frontend && npm run dev" --port 5173 \
+ -- python your_automation.py
+```
+
+To create an automation script, include only Playwright logic (servers are managed automatically):
+```python
+from playwright.sync_api import sync_playwright
+
+with sync_playwright() as p:
+ browser = p.chromium.launch(headless=True) # Always launch chromium in headless mode
+ page = browser.new_page()
+ page.goto('http://localhost:5173') # Server already running and ready
+ page.wait_for_load_state('networkidle') # CRITICAL: Wait for JS to execute
+ # ... your automation logic
+ browser.close()
+```
+
+## Reconnaissance-Then-Action Pattern
+
+1. **Inspect rendered DOM**:
+ ```python
+ page.screenshot(path='/tmp/inspect.png', full_page=True)
+ content = page.content()
+ page.locator('button').all()
+ ```
+
+2. **Identify selectors** from inspection results
+
+3. **Execute actions** using discovered selectors
+
+## Common Pitfall
+
+❌ **Don't** inspect the DOM before waiting for `networkidle` on dynamic apps
+✅ **Do** wait for `page.wait_for_load_state('networkidle')` before inspection
+
+## Best Practices
+
+- **Use bundled scripts as black boxes** - To accomplish a task, consider whether one of the scripts available in `scripts/` can help. These scripts handle common, complex workflows reliably without cluttering the context window. Use `--help` to see usage, then invoke directly.
+- Use `sync_playwright()` for synchronous scripts
+- Always close the browser when done
+- Use descriptive selectors: `text=`, `role=`, CSS selectors, or IDs
+- Add appropriate waits: `page.wait_for_selector()` or `page.wait_for_timeout()`
+
+## Reference Files
+
+- **examples/** - Examples showing common patterns:
+ - `element_discovery.py` - Discovering buttons, links, and inputs on a page
+ - `static_html_automation.py` - Using file:// URLs for local HTML
+ - `console_logging.py` - Capturing console logs during automation
\ No newline at end of file
diff --git a/.claude/skills/webapp-testing/examples/console_logging.py b/.claude/skills/webapp-testing/examples/console_logging.py
new file mode 100644
index 0000000..9329b5e
--- /dev/null
+++ b/.claude/skills/webapp-testing/examples/console_logging.py
@@ -0,0 +1,35 @@
+from playwright.sync_api import sync_playwright
+
+# Example: Capturing console logs during browser automation
+
+url = 'http://localhost:5173' # Replace with your URL
+
+console_logs = []
+
+with sync_playwright() as p:
+ browser = p.chromium.launch(headless=True)
+ page = browser.new_page(viewport={'width': 1920, 'height': 1080})
+
+ # Set up console log capture
+ def handle_console_message(msg):
+ console_logs.append(f"[{msg.type}] {msg.text}")
+ print(f"Console: [{msg.type}] {msg.text}")
+
+ page.on("console", handle_console_message)
+
+ # Navigate to page
+ page.goto(url)
+ page.wait_for_load_state('networkidle')
+
+ # Interact with the page (triggers console logs)
+ page.click('text=Dashboard')
+ page.wait_for_timeout(1000)
+
+ browser.close()
+
+# Save console logs to file
+with open('/mnt/user-data/outputs/console.log', 'w') as f:
+ f.write('\n'.join(console_logs))
+
+print(f"\nCaptured {len(console_logs)} console messages")
+print(f"Logs saved to: /mnt/user-data/outputs/console.log")
\ No newline at end of file
diff --git a/.claude/skills/webapp-testing/examples/element_discovery.py b/.claude/skills/webapp-testing/examples/element_discovery.py
new file mode 100644
index 0000000..917ba72
--- /dev/null
+++ b/.claude/skills/webapp-testing/examples/element_discovery.py
@@ -0,0 +1,40 @@
+from playwright.sync_api import sync_playwright
+
+# Example: Discovering buttons and other elements on a page
+
+with sync_playwright() as p:
+ browser = p.chromium.launch(headless=True)
+ page = browser.new_page()
+
+ # Navigate to page and wait for it to fully load
+ page.goto('http://localhost:5173')
+ page.wait_for_load_state('networkidle')
+
+ # Discover all buttons on the page
+ buttons = page.locator('button').all()
+ print(f"Found {len(buttons)} buttons:")
+ for i, button in enumerate(buttons):
+ text = button.inner_text() if button.is_visible() else "[hidden]"
+ print(f" [{i}] {text}")
+
+ # Discover links
+ links = page.locator('a[href]').all()
+ print(f"\nFound {len(links)} links:")
+ for link in links[:5]: # Show first 5
+ text = link.inner_text().strip()
+ href = link.get_attribute('href')
+ print(f" - {text} -> {href}")
+
+ # Discover input fields
+ inputs = page.locator('input, textarea, select').all()
+ print(f"\nFound {len(inputs)} input fields:")
+ for input_elem in inputs:
+ name = input_elem.get_attribute('name') or input_elem.get_attribute('id') or "[unnamed]"
+ input_type = input_elem.get_attribute('type') or 'text'
+ print(f" - {name} ({input_type})")
+
+ # Take screenshot for visual reference
+ page.screenshot(path='/tmp/page_discovery.png', full_page=True)
+ print("\nScreenshot saved to /tmp/page_discovery.png")
+
+ browser.close()
\ No newline at end of file
diff --git a/.claude/skills/webapp-testing/examples/static_html_automation.py b/.claude/skills/webapp-testing/examples/static_html_automation.py
new file mode 100644
index 0000000..90bbedc
--- /dev/null
+++ b/.claude/skills/webapp-testing/examples/static_html_automation.py
@@ -0,0 +1,33 @@
+from playwright.sync_api import sync_playwright
+import os
+
+# Example: Automating interaction with static HTML files using file:// URLs
+
+html_file_path = os.path.abspath('path/to/your/file.html')
+file_url = f'file://{html_file_path}'
+
+with sync_playwright() as p:
+ browser = p.chromium.launch(headless=True)
+ page = browser.new_page(viewport={'width': 1920, 'height': 1080})
+
+ # Navigate to local HTML file
+ page.goto(file_url)
+
+ # Take screenshot
+ page.screenshot(path='/mnt/user-data/outputs/static_page.png', full_page=True)
+
+ # Interact with elements
+ page.click('text=Click Me')
+ page.fill('#name', 'John Doe')
+ page.fill('#email', 'john@example.com')
+
+ # Submit form
+ page.click('button[type="submit"]')
+ page.wait_for_timeout(500)
+
+ # Take final screenshot
+ page.screenshot(path='/mnt/user-data/outputs/after_submit.png', full_page=True)
+
+ browser.close()
+
+print("Static HTML automation completed!")
\ No newline at end of file
diff --git a/.claude/skills/webapp-testing/scripts/with_server.py b/.claude/skills/webapp-testing/scripts/with_server.py
new file mode 100644
index 0000000..431f2eb
--- /dev/null
+++ b/.claude/skills/webapp-testing/scripts/with_server.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+"""
+Start one or more servers, wait for them to be ready, run a command, then clean up.
+
+Usage:
+ # Single server
+ python scripts/with_server.py --server "npm run dev" --port 5173 -- python automation.py
+ python scripts/with_server.py --server "npm start" --port 3000 -- python test.py
+
+ # Multiple servers
+ python scripts/with_server.py \
+ --server "cd backend && python server.py" --port 3000 \
+ --server "cd frontend && npm run dev" --port 5173 \
+ -- python test.py
+"""
+
+import subprocess
+import socket
+import time
+import sys
+import argparse
+
+def is_server_ready(port, timeout=30):
+ """Wait for server to be ready by polling the port."""
+ start_time = time.time()
+ while time.time() - start_time < timeout:
+ try:
+ with socket.create_connection(('localhost', port), timeout=1):
+ return True
+ except (socket.error, ConnectionRefusedError):
+ time.sleep(0.5)
+ return False
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Run command with one or more servers')
+ parser.add_argument('--server', action='append', dest='servers', required=True, help='Server command (can be repeated)')
+ parser.add_argument('--port', action='append', dest='ports', type=int, required=True, help='Port for each server (must match --server count)')
+ parser.add_argument('--timeout', type=int, default=30, help='Timeout in seconds per server (default: 30)')
+ parser.add_argument('command', nargs=argparse.REMAINDER, help='Command to run after server(s) ready')
+
+ args = parser.parse_args()
+
+ # Remove the '--' separator if present
+ if args.command and args.command[0] == '--':
+ args.command = args.command[1:]
+
+ if not args.command:
+ print("Error: No command specified to run")
+ sys.exit(1)
+
+ # Parse server configurations
+ if len(args.servers) != len(args.ports):
+ print("Error: Number of --server and --port arguments must match")
+ sys.exit(1)
+
+ servers = []
+ for cmd, port in zip(args.servers, args.ports):
+ servers.append({'cmd': cmd, 'port': port})
+
+ server_processes = []
+
+ try:
+ # Start all servers
+ for i, server in enumerate(servers):
+ print(f"Starting server {i+1}/{len(servers)}: {server['cmd']}")
+
+ # Use shell=True to support commands with cd and &&
+ process = subprocess.Popen(
+ server['cmd'],
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ server_processes.append(process)
+
+ # Wait for this server to be ready
+ print(f"Waiting for server on port {server['port']}...")
+ if not is_server_ready(server['port'], timeout=args.timeout):
+ raise RuntimeError(f"Server failed to start on port {server['port']} within {args.timeout}s")
+
+ print(f"Server ready on port {server['port']}")
+
+ print(f"\nAll {len(servers)} server(s) ready")
+
+ # Run the command
+ print(f"Running: {' '.join(args.command)}\n")
+ result = subprocess.run(args.command)
+ sys.exit(result.returncode)
+
+ finally:
+ # Clean up all servers
+ print(f"\nStopping {len(server_processes)} server(s)...")
+ for i, process in enumerate(server_processes):
+ try:
+ process.terminate()
+ process.wait(timeout=5)
+ except subprocess.TimeoutExpired:
+ process.kill()
+ process.wait()
+ print(f"Server {i+1} stopped")
+ print("All servers stopped")
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/.claude/skills/xlsx/.openskills.json b/.claude/skills/xlsx/.openskills.json
new file mode 100644
index 0000000..91628a7
--- /dev/null
+++ b/.claude/skills/xlsx/.openskills.json
@@ -0,0 +1,7 @@
+{
+ "source": "anthropics/skills",
+ "sourceType": "git",
+ "repoUrl": "https://github.com/anthropics/skills",
+ "subpath": "skills\\xlsx",
+ "installedAt": "2026-03-02T09:19:50.195Z"
+}
\ No newline at end of file
diff --git a/.claude/skills/xlsx/LICENSE.txt b/.claude/skills/xlsx/LICENSE.txt
new file mode 100644
index 0000000..c55ab42
--- /dev/null
+++ b/.claude/skills/xlsx/LICENSE.txt
@@ -0,0 +1,30 @@
+© 2025 Anthropic, PBC. All rights reserved.
+
+LICENSE: Use of these materials (including all code, prompts, assets, files,
+and other components of this Skill) is governed by your agreement with
+Anthropic regarding use of Anthropic's services. If no separate agreement
+exists, use is governed by Anthropic's Consumer Terms of Service or
+Commercial Terms of Service, as applicable:
+https://www.anthropic.com/legal/consumer-terms
+https://www.anthropic.com/legal/commercial-terms
+Your applicable agreement is referred to as the "Agreement." "Services" are
+as defined in the Agreement.
+
+ADDITIONAL RESTRICTIONS: Notwithstanding anything in the Agreement to the
+contrary, users may not:
+
+- Extract these materials from the Services or retain copies of these
+ materials outside the Services
+- Reproduce or copy these materials, except for temporary copies created
+ automatically during authorized use of the Services
+- Create derivative works based on these materials
+- Distribute, sublicense, or transfer these materials to any third party
+- Make, offer to sell, sell, or import any inventions embodied in these
+ materials
+- Reverse engineer, decompile, or disassemble these materials
+
+The receipt, viewing, or possession of these materials does not convey or
+imply any license or right beyond those expressly granted above.
+
+Anthropic retains all right, title, and interest in these materials,
+including all copyrights, patents, and other intellectual property rights.
diff --git a/.claude/skills/xlsx/SKILL.md b/.claude/skills/xlsx/SKILL.md
new file mode 100644
index 0000000..c5c881b
--- /dev/null
+++ b/.claude/skills/xlsx/SKILL.md
@@ -0,0 +1,292 @@
+---
+name: xlsx
+description: "Use this skill any time a spreadsheet file is the primary input or output. This means any task where the user wants to: open, read, edit, or fix an existing .xlsx, .xlsm, .csv, or .tsv file (e.g., adding columns, computing formulas, formatting, charting, cleaning messy data); create a new spreadsheet from scratch or from other data sources; or convert between tabular file formats. Trigger especially when the user references a spreadsheet file by name or path — even casually (like \"the xlsx in my downloads\") — and wants something done to it or produced from it. Also trigger for cleaning or restructuring messy tabular data files (malformed rows, misplaced headers, junk data) into proper spreadsheets. The deliverable must be a spreadsheet file. Do NOT trigger when the primary deliverable is a Word document, HTML report, standalone Python script, database pipeline, or Google Sheets API integration, even if tabular data is involved."
+license: Proprietary. LICENSE.txt has complete terms
+---
+
+# Requirements for Outputs
+
+## All Excel files
+
+### Professional Font
+- Use a consistent, professional font (e.g., Arial, Times New Roman) for all deliverables unless otherwise instructed by the user
+
+### Zero Formula Errors
+- Every Excel model MUST be delivered with ZERO formula errors (#REF!, #DIV/0!, #VALUE!, #N/A, #NAME?)
+
+### Preserve Existing Templates (when updating templates)
+- Study and EXACTLY match existing format, style, and conventions when modifying files
+- Never impose standardized formatting on files with established patterns
+- Existing template conventions ALWAYS override these guidelines
+
+## Financial models
+
+### Color Coding Standards
+Unless otherwise stated by the user or existing template
+
+#### Industry-Standard Color Conventions
+- **Blue text (RGB: 0,0,255)**: Hardcoded inputs, and numbers users will change for scenarios
+- **Black text (RGB: 0,0,0)**: ALL formulas and calculations
+- **Green text (RGB: 0,128,0)**: Links pulling from other worksheets within same workbook
+- **Red text (RGB: 255,0,0)**: External links to other files
+- **Yellow background (RGB: 255,255,0)**: Key assumptions needing attention or cells that need to be updated
+
+### Number Formatting Standards
+
+#### Required Format Rules
+- **Years**: Format as text strings (e.g., "2024" not "2,024")
+- **Currency**: Use $#,##0 format; ALWAYS specify units in headers ("Revenue ($mm)")
+- **Zeros**: Use number formatting to make all zeros "-", including percentages (e.g., "$#,##0;($#,##0);-")
+- **Percentages**: Default to 0.0% format (one decimal)
+- **Multiples**: Format as 0.0x for valuation multiples (EV/EBITDA, P/E)
+- **Negative numbers**: Use parentheses (123) not minus -123
+
+### Formula Construction Rules
+
+#### Assumptions Placement
+- Place ALL assumptions (growth rates, margins, multiples, etc.) in separate assumption cells
+- Use cell references instead of hardcoded values in formulas
+- Example: Use =B5*(1+$B$6) instead of =B5*1.05
+
+#### Formula Error Prevention
+- Verify all cell references are correct
+- Check for off-by-one errors in ranges
+- Ensure consistent formulas across all projection periods
+- Test with edge cases (zero values, negative numbers)
+- Verify no unintended circular references
+
+#### Documentation Requirements for Hardcodes
+- Comment or in cells beside (if end of table). Format: "Source: [System/Document], [Date], [Specific Reference], [URL if applicable]"
+- Examples:
+ - "Source: Company 10-K, FY2024, Page 45, Revenue Note, [SEC EDGAR URL]"
+ - "Source: Company 10-Q, Q2 2025, Exhibit 99.1, [SEC EDGAR URL]"
+ - "Source: Bloomberg Terminal, 8/15/2025, AAPL US Equity"
+ - "Source: FactSet, 8/20/2025, Consensus Estimates Screen"
+
+# XLSX creation, editing, and analysis
+
+## Overview
+
+A user may ask you to create, edit, or analyze the contents of an .xlsx file. You have different tools and workflows available for different tasks.
+
+## Important Requirements
+
+**LibreOffice Required for Formula Recalculation**: You can assume LibreOffice is installed for recalculating formula values using the `scripts/recalc.py` script. The script automatically configures LibreOffice on first run, including in sandboxed environments where Unix sockets are restricted (handled by `scripts/office/soffice.py`)
+
+## Reading and analyzing data
+
+### Data analysis with pandas
+For data analysis, visualization, and basic operations, use **pandas** which provides powerful data manipulation capabilities:
+
+```python
+import pandas as pd
+
+# Read Excel
+df = pd.read_excel('file.xlsx') # Default: first sheet
+all_sheets = pd.read_excel('file.xlsx', sheet_name=None) # All sheets as dict
+
+# Analyze
+df.head() # Preview data
+df.info() # Column info
+df.describe() # Statistics
+
+# Write Excel
+df.to_excel('output.xlsx', index=False)
+```
+
+## Excel File Workflows
+
+## CRITICAL: Use Formulas, Not Hardcoded Values
+
+**Always use Excel formulas instead of calculating values in Python and hardcoding them.** This ensures the spreadsheet remains dynamic and updateable.
+
+### ❌ WRONG - Hardcoding Calculated Values
+```python
+# Bad: Calculating in Python and hardcoding result
+total = df['Sales'].sum()
+sheet['B10'] = total # Hardcodes 5000
+
+# Bad: Computing growth rate in Python
+growth = (df.iloc[-1]['Revenue'] - df.iloc[0]['Revenue']) / df.iloc[0]['Revenue']
+sheet['C5'] = growth # Hardcodes 0.15
+
+# Bad: Python calculation for average
+avg = sum(values) / len(values)
+sheet['D20'] = avg # Hardcodes 42.5
+```
+
+### ✅ CORRECT - Using Excel Formulas
+```python
+# Good: Let Excel calculate the sum
+sheet['B10'] = '=SUM(B2:B9)'
+
+# Good: Growth rate as Excel formula
+sheet['C5'] = '=(C4-C2)/C2'
+
+# Good: Average using Excel function
+sheet['D20'] = '=AVERAGE(D2:D19)'
+```
+
+This applies to ALL calculations - totals, percentages, ratios, differences, etc. The spreadsheet should be able to recalculate when source data changes.
+
+## Common Workflow
+1. **Choose tool**: pandas for data, openpyxl for formulas/formatting
+2. **Create/Load**: Create new workbook or load existing file
+3. **Modify**: Add/edit data, formulas, and formatting
+4. **Save**: Write to file
+5. **Recalculate formulas (MANDATORY IF USING FORMULAS)**: Use the scripts/recalc.py script
+ ```bash
+ python scripts/recalc.py output.xlsx
+ ```
+6. **Verify and fix any errors**:
+ - The script returns JSON with error details
+ - If `status` is `errors_found`, check `error_summary` for specific error types and locations
+ - Fix the identified errors and recalculate again
+ - Common errors to fix:
+ - `#REF!`: Invalid cell references
+ - `#DIV/0!`: Division by zero
+ - `#VALUE!`: Wrong data type in formula
+ - `#NAME?`: Unrecognized formula name
+
+### Creating new Excel files
+
+```python
+# Using openpyxl for formulas and formatting
+from openpyxl import Workbook
+from openpyxl.styles import Font, PatternFill, Alignment
+
+wb = Workbook()
+sheet = wb.active
+
+# Add data
+sheet['A1'] = 'Hello'
+sheet['B1'] = 'World'
+sheet.append(['Row', 'of', 'data'])
+
+# Add formula
+sheet['B2'] = '=SUM(A1:A10)'
+
+# Formatting
+sheet['A1'].font = Font(bold=True, color='FF0000')
+sheet['A1'].fill = PatternFill('solid', start_color='FFFF00')
+sheet['A1'].alignment = Alignment(horizontal='center')
+
+# Column width
+sheet.column_dimensions['A'].width = 20
+
+wb.save('output.xlsx')
+```
+
+### Editing existing Excel files
+
+```python
+# Using openpyxl to preserve formulas and formatting
+from openpyxl import load_workbook
+
+# Load existing file
+wb = load_workbook('existing.xlsx')
+sheet = wb.active # or wb['SheetName'] for specific sheet
+
+# Working with multiple sheets
+for sheet_name in wb.sheetnames:
+ sheet = wb[sheet_name]
+ print(f"Sheet: {sheet_name}")
+
+# Modify cells
+sheet['A1'] = 'New Value'
+sheet.insert_rows(2) # Insert row at position 2
+sheet.delete_cols(3) # Delete column 3
+
+# Add new sheet
+new_sheet = wb.create_sheet('NewSheet')
+new_sheet['A1'] = 'Data'
+
+wb.save('modified.xlsx')
+```
+
+## Recalculating formulas
+
+Excel files created or modified by openpyxl contain formulas as strings but not calculated values. Use the provided `scripts/recalc.py` script to recalculate formulas:
+
+```bash
+python scripts/recalc.py [timeout_seconds]
+```
+
+Example:
+```bash
+python scripts/recalc.py output.xlsx 30
+```
+
+The script:
+- Automatically sets up LibreOffice macro on first run
+- Recalculates all formulas in all sheets
+- Scans ALL cells for Excel errors (#REF!, #DIV/0!, etc.)
+- Returns JSON with detailed error locations and counts
+- Works on both Linux and macOS
+
+## Formula Verification Checklist
+
+Quick checks to ensure formulas work correctly:
+
+### Essential Verification
+- [ ] **Test 2-3 sample references**: Verify they pull correct values before building full model
+- [ ] **Column mapping**: Confirm Excel columns match (e.g., column 64 = BL, not BK)
+- [ ] **Row offset**: Remember Excel rows are 1-indexed (DataFrame row 5 = Excel row 6)
+
+### Common Pitfalls
+- [ ] **NaN handling**: Check for null values with `pd.notna()`
+- [ ] **Far-right columns**: FY data often in columns 50+
+- [ ] **Multiple matches**: Search all occurrences, not just first
+- [ ] **Division by zero**: Check denominators before using `/` in formulas (#DIV/0!)
+- [ ] **Wrong references**: Verify all cell references point to intended cells (#REF!)
+- [ ] **Cross-sheet references**: Use correct format (Sheet1!A1) for linking sheets
+
+### Formula Testing Strategy
+- [ ] **Start small**: Test formulas on 2-3 cells before applying broadly
+- [ ] **Verify dependencies**: Check all cells referenced in formulas exist
+- [ ] **Test edge cases**: Include zero, negative, and very large values
+
+### Interpreting scripts/recalc.py Output
+The script returns JSON with error details:
+```json
+{
+ "status": "success", // or "errors_found"
+ "total_errors": 0, // Total error count
+ "total_formulas": 42, // Number of formulas in file
+ "error_summary": { // Only present if errors found
+ "#REF!": {
+ "count": 2,
+ "locations": ["Sheet1!B5", "Sheet1!C10"]
+ }
+ }
+}
+```
+
+## Best Practices
+
+### Library Selection
+- **pandas**: Best for data analysis, bulk operations, and simple data export
+- **openpyxl**: Best for complex formatting, formulas, and Excel-specific features
+
+### Working with openpyxl
+- Cell indices are 1-based (row=1, column=1 refers to cell A1)
+- Use `data_only=True` to read calculated values: `load_workbook('file.xlsx', data_only=True)`
+- **Warning**: If opened with `data_only=True` and saved, formulas are replaced with values and permanently lost
+- For large files: Use `read_only=True` for reading or `write_only=True` for writing
+- Formulas are preserved but not evaluated - use scripts/recalc.py to update values
+
+### Working with pandas
+- Specify data types to avoid inference issues: `pd.read_excel('file.xlsx', dtype={'id': str})`
+- For large files, read specific columns: `pd.read_excel('file.xlsx', usecols=['A', 'C', 'E'])`
+- Handle dates properly: `pd.read_excel('file.xlsx', parse_dates=['date_column'])`
+
+## Code Style Guidelines
+**IMPORTANT**: When generating Python code for Excel operations:
+- Write minimal, concise Python code without unnecessary comments
+- Avoid verbose variable names and redundant operations
+- Avoid unnecessary print statements
+
+**For Excel files themselves**:
+- Add comments to cells with complex formulas or important assumptions
+- Document data sources for hardcoded values
+- Include notes for key calculations and model sections
\ No newline at end of file
diff --git a/.claude/skills/xlsx/scripts/office/helpers/__init__.py b/.claude/skills/xlsx/scripts/office/helpers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/.claude/skills/xlsx/scripts/office/helpers/merge_runs.py b/.claude/skills/xlsx/scripts/office/helpers/merge_runs.py
new file mode 100644
index 0000000..ad7c25e
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/helpers/merge_runs.py
@@ -0,0 +1,199 @@
+"""Merge adjacent runs with identical formatting in DOCX.
+
+Merges adjacent elements that have identical properties.
+Works on runs in paragraphs and inside tracked changes (, ).
+
+Also:
+- Removes rsid attributes from runs (revision metadata that doesn't affect rendering)
+- Removes proofErr elements (spell/grammar markers that block merging)
+"""
+
+from pathlib import Path
+
+import defusedxml.minidom
+
+
+def merge_runs(input_dir: str) -> tuple[int, str]:
+ doc_xml = Path(input_dir) / "word" / "document.xml"
+
+ if not doc_xml.exists():
+ return 0, f"Error: {doc_xml} not found"
+
+ try:
+ dom = defusedxml.minidom.parseString(doc_xml.read_text(encoding="utf-8"))
+ root = dom.documentElement
+
+ _remove_elements(root, "proofErr")
+ _strip_run_rsid_attrs(root)
+
+ containers = {run.parentNode for run in _find_elements(root, "r")}
+
+ merge_count = 0
+ for container in containers:
+ merge_count += _merge_runs_in(container)
+
+ doc_xml.write_bytes(dom.toxml(encoding="UTF-8"))
+ return merge_count, f"Merged {merge_count} runs"
+
+ except Exception as e:
+ return 0, f"Error: {e}"
+
+
+
+
+def _find_elements(root, tag: str) -> list:
+ results = []
+
+ def traverse(node):
+ if node.nodeType == node.ELEMENT_NODE:
+ name = node.localName or node.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(node)
+ for child in node.childNodes:
+ traverse(child)
+
+ traverse(root)
+ return results
+
+
+def _get_child(parent, tag: str):
+ for child in parent.childNodes:
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ return child
+ return None
+
+
+def _get_children(parent, tag: str) -> list:
+ results = []
+ for child in parent.childNodes:
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(child)
+ return results
+
+
+def _is_adjacent(elem1, elem2) -> bool:
+ node = elem1.nextSibling
+ while node:
+ if node == elem2:
+ return True
+ if node.nodeType == node.ELEMENT_NODE:
+ return False
+ if node.nodeType == node.TEXT_NODE and node.data.strip():
+ return False
+ node = node.nextSibling
+ return False
+
+
+
+
+def _remove_elements(root, tag: str):
+ for elem in _find_elements(root, tag):
+ if elem.parentNode:
+ elem.parentNode.removeChild(elem)
+
+
+def _strip_run_rsid_attrs(root):
+ for run in _find_elements(root, "r"):
+ for attr in list(run.attributes.values()):
+ if "rsid" in attr.name.lower():
+ run.removeAttribute(attr.name)
+
+
+
+
+def _merge_runs_in(container) -> int:
+ merge_count = 0
+ run = _first_child_run(container)
+
+ while run:
+ while True:
+ next_elem = _next_element_sibling(run)
+ if next_elem and _is_run(next_elem) and _can_merge(run, next_elem):
+ _merge_run_content(run, next_elem)
+ container.removeChild(next_elem)
+ merge_count += 1
+ else:
+ break
+
+ _consolidate_text(run)
+ run = _next_sibling_run(run)
+
+ return merge_count
+
+
+def _first_child_run(container):
+ for child in container.childNodes:
+ if child.nodeType == child.ELEMENT_NODE and _is_run(child):
+ return child
+ return None
+
+
+def _next_element_sibling(node):
+ sibling = node.nextSibling
+ while sibling:
+ if sibling.nodeType == sibling.ELEMENT_NODE:
+ return sibling
+ sibling = sibling.nextSibling
+ return None
+
+
+def _next_sibling_run(node):
+ sibling = node.nextSibling
+ while sibling:
+ if sibling.nodeType == sibling.ELEMENT_NODE:
+ if _is_run(sibling):
+ return sibling
+ sibling = sibling.nextSibling
+ return None
+
+
+def _is_run(node) -> bool:
+ name = node.localName or node.tagName
+ return name == "r" or name.endswith(":r")
+
+
+def _can_merge(run1, run2) -> bool:
+ rpr1 = _get_child(run1, "rPr")
+ rpr2 = _get_child(run2, "rPr")
+
+ if (rpr1 is None) != (rpr2 is None):
+ return False
+ if rpr1 is None:
+ return True
+ return rpr1.toxml() == rpr2.toxml()
+
+
+def _merge_run_content(target, source):
+ for child in list(source.childNodes):
+ if child.nodeType == child.ELEMENT_NODE:
+ name = child.localName or child.tagName
+ if name != "rPr" and not name.endswith(":rPr"):
+ target.appendChild(child)
+
+
+def _consolidate_text(run):
+ t_elements = _get_children(run, "t")
+
+ for i in range(len(t_elements) - 1, 0, -1):
+ curr, prev = t_elements[i], t_elements[i - 1]
+
+ if _is_adjacent(prev, curr):
+ prev_text = prev.firstChild.data if prev.firstChild else ""
+ curr_text = curr.firstChild.data if curr.firstChild else ""
+ merged = prev_text + curr_text
+
+ if prev.firstChild:
+ prev.firstChild.data = merged
+ else:
+ prev.appendChild(run.ownerDocument.createTextNode(merged))
+
+ if merged.startswith(" ") or merged.endswith(" "):
+ prev.setAttribute("xml:space", "preserve")
+ elif prev.hasAttribute("xml:space"):
+ prev.removeAttribute("xml:space")
+
+ run.removeChild(curr)
diff --git a/.claude/skills/xlsx/scripts/office/helpers/simplify_redlines.py b/.claude/skills/xlsx/scripts/office/helpers/simplify_redlines.py
new file mode 100644
index 0000000..db963bb
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/helpers/simplify_redlines.py
@@ -0,0 +1,197 @@
+"""Simplify tracked changes by merging adjacent w:ins or w:del elements.
+
+Merges adjacent elements from the same author into a single element.
+Same for elements. This makes heavily-redlined documents easier to
+work with by reducing the number of tracked change wrappers.
+
+Rules:
+- Only merges w:ins with w:ins, w:del with w:del (same element type)
+- Only merges if same author (ignores timestamp differences)
+- Only merges if truly adjacent (only whitespace between them)
+"""
+
+import xml.etree.ElementTree as ET
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+WORD_NS = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+
+
+def simplify_redlines(input_dir: str) -> tuple[int, str]:
+ doc_xml = Path(input_dir) / "word" / "document.xml"
+
+ if not doc_xml.exists():
+ return 0, f"Error: {doc_xml} not found"
+
+ try:
+ dom = defusedxml.minidom.parseString(doc_xml.read_text(encoding="utf-8"))
+ root = dom.documentElement
+
+ merge_count = 0
+
+ containers = _find_elements(root, "p") + _find_elements(root, "tc")
+
+ for container in containers:
+ merge_count += _merge_tracked_changes_in(container, "ins")
+ merge_count += _merge_tracked_changes_in(container, "del")
+
+ doc_xml.write_bytes(dom.toxml(encoding="UTF-8"))
+ return merge_count, f"Simplified {merge_count} tracked changes"
+
+ except Exception as e:
+ return 0, f"Error: {e}"
+
+
+def _merge_tracked_changes_in(container, tag: str) -> int:
+ merge_count = 0
+
+ tracked = [
+ child
+ for child in container.childNodes
+ if child.nodeType == child.ELEMENT_NODE and _is_element(child, tag)
+ ]
+
+ if len(tracked) < 2:
+ return 0
+
+ i = 0
+ while i < len(tracked) - 1:
+ curr = tracked[i]
+ next_elem = tracked[i + 1]
+
+ if _can_merge_tracked(curr, next_elem):
+ _merge_tracked_content(curr, next_elem)
+ container.removeChild(next_elem)
+ tracked.pop(i + 1)
+ merge_count += 1
+ else:
+ i += 1
+
+ return merge_count
+
+
+def _is_element(node, tag: str) -> bool:
+ name = node.localName or node.tagName
+ return name == tag or name.endswith(f":{tag}")
+
+
+def _get_author(elem) -> str:
+ author = elem.getAttribute("w:author")
+ if not author:
+ for attr in elem.attributes.values():
+ if attr.localName == "author" or attr.name.endswith(":author"):
+ return attr.value
+ return author
+
+
+def _can_merge_tracked(elem1, elem2) -> bool:
+ if _get_author(elem1) != _get_author(elem2):
+ return False
+
+ node = elem1.nextSibling
+ while node and node != elem2:
+ if node.nodeType == node.ELEMENT_NODE:
+ return False
+ if node.nodeType == node.TEXT_NODE and node.data.strip():
+ return False
+ node = node.nextSibling
+
+ return True
+
+
+def _merge_tracked_content(target, source):
+ while source.firstChild:
+ child = source.firstChild
+ source.removeChild(child)
+ target.appendChild(child)
+
+
+def _find_elements(root, tag: str) -> list:
+ results = []
+
+ def traverse(node):
+ if node.nodeType == node.ELEMENT_NODE:
+ name = node.localName or node.tagName
+ if name == tag or name.endswith(f":{tag}"):
+ results.append(node)
+ for child in node.childNodes:
+ traverse(child)
+
+ traverse(root)
+ return results
+
+
+def get_tracked_change_authors(doc_xml_path: Path) -> dict[str, int]:
+ if not doc_xml_path.exists():
+ return {}
+
+ try:
+ tree = ET.parse(doc_xml_path)
+ root = tree.getroot()
+ except ET.ParseError:
+ return {}
+
+ namespaces = {"w": WORD_NS}
+ author_attr = f"{{{WORD_NS}}}author"
+
+ authors: dict[str, int] = {}
+ for tag in ["ins", "del"]:
+ for elem in root.findall(f".//w:{tag}", namespaces):
+ author = elem.get(author_attr)
+ if author:
+ authors[author] = authors.get(author, 0) + 1
+
+ return authors
+
+
+def _get_authors_from_docx(docx_path: Path) -> dict[str, int]:
+ try:
+ with zipfile.ZipFile(docx_path, "r") as zf:
+ if "word/document.xml" not in zf.namelist():
+ return {}
+ with zf.open("word/document.xml") as f:
+ tree = ET.parse(f)
+ root = tree.getroot()
+
+ namespaces = {"w": WORD_NS}
+ author_attr = f"{{{WORD_NS}}}author"
+
+ authors: dict[str, int] = {}
+ for tag in ["ins", "del"]:
+ for elem in root.findall(f".//w:{tag}", namespaces):
+ author = elem.get(author_attr)
+ if author:
+ authors[author] = authors.get(author, 0) + 1
+ return authors
+ except (zipfile.BadZipFile, ET.ParseError):
+ return {}
+
+
+def infer_author(modified_dir: Path, original_docx: Path, default: str = "Claude") -> str:
+ modified_xml = modified_dir / "word" / "document.xml"
+ modified_authors = get_tracked_change_authors(modified_xml)
+
+ if not modified_authors:
+ return default
+
+ original_authors = _get_authors_from_docx(original_docx)
+
+ new_changes: dict[str, int] = {}
+ for author, count in modified_authors.items():
+ original_count = original_authors.get(author, 0)
+ diff = count - original_count
+ if diff > 0:
+ new_changes[author] = diff
+
+ if not new_changes:
+ return default
+
+ if len(new_changes) == 1:
+ return next(iter(new_changes))
+
+ raise ValueError(
+ f"Multiple authors added new changes: {new_changes}. "
+ "Cannot infer which author to validate."
+ )
diff --git a/.claude/skills/xlsx/scripts/office/pack.py b/.claude/skills/xlsx/scripts/office/pack.py
new file mode 100644
index 0000000..db29ed8
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/pack.py
@@ -0,0 +1,159 @@
+"""Pack a directory into a DOCX, PPTX, or XLSX file.
+
+Validates with auto-repair, condenses XML formatting, and creates the Office file.
+
+Usage:
+ python pack.py [--original ] [--validate true|false]
+
+Examples:
+ python pack.py unpacked/ output.docx --original input.docx
+ python pack.py unpacked/ output.pptx --validate false
+"""
+
+import argparse
+import sys
+import shutil
+import tempfile
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+from validators import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator
+
+def pack(
+ input_directory: str,
+ output_file: str,
+ original_file: str | None = None,
+ validate: bool = True,
+ infer_author_func=None,
+) -> tuple[None, str]:
+ input_dir = Path(input_directory)
+ output_path = Path(output_file)
+ suffix = output_path.suffix.lower()
+
+ if not input_dir.is_dir():
+ return None, f"Error: {input_dir} is not a directory"
+
+ if suffix not in {".docx", ".pptx", ".xlsx"}:
+ return None, f"Error: {output_file} must be a .docx, .pptx, or .xlsx file"
+
+ if validate and original_file:
+ original_path = Path(original_file)
+ if original_path.exists():
+ success, output = _run_validation(
+ input_dir, original_path, suffix, infer_author_func
+ )
+ if output:
+ print(output)
+ if not success:
+ return None, f"Error: Validation failed for {input_dir}"
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_content_dir = Path(temp_dir) / "content"
+ shutil.copytree(input_dir, temp_content_dir)
+
+ for pattern in ["*.xml", "*.rels"]:
+ for xml_file in temp_content_dir.rglob(pattern):
+ _condense_xml(xml_file)
+
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as zf:
+ for f in temp_content_dir.rglob("*"):
+ if f.is_file():
+ zf.write(f, f.relative_to(temp_content_dir))
+
+ return None, f"Successfully packed {input_dir} to {output_file}"
+
+
+def _run_validation(
+ unpacked_dir: Path,
+ original_file: Path,
+ suffix: str,
+ infer_author_func=None,
+) -> tuple[bool, str | None]:
+ output_lines = []
+ validators = []
+
+ if suffix == ".docx":
+ author = "Claude"
+ if infer_author_func:
+ try:
+ author = infer_author_func(unpacked_dir, original_file)
+ except ValueError as e:
+ print(f"Warning: {e} Using default author 'Claude'.", file=sys.stderr)
+
+ validators = [
+ DOCXSchemaValidator(unpacked_dir, original_file),
+ RedliningValidator(unpacked_dir, original_file, author=author),
+ ]
+ elif suffix == ".pptx":
+ validators = [PPTXSchemaValidator(unpacked_dir, original_file)]
+
+ if not validators:
+ return True, None
+
+ total_repairs = sum(v.repair() for v in validators)
+ if total_repairs:
+ output_lines.append(f"Auto-repaired {total_repairs} issue(s)")
+
+ success = all(v.validate() for v in validators)
+
+ if success:
+ output_lines.append("All validations PASSED!")
+
+ return success, "\n".join(output_lines) if output_lines else None
+
+
+def _condense_xml(xml_file: Path) -> None:
+ try:
+ with open(xml_file, encoding="utf-8") as f:
+ dom = defusedxml.minidom.parse(f)
+
+ for element in dom.getElementsByTagName("*"):
+ if element.tagName.endswith(":t"):
+ continue
+
+ for child in list(element.childNodes):
+ if (
+ child.nodeType == child.TEXT_NODE
+ and child.nodeValue
+ and child.nodeValue.strip() == ""
+ ) or child.nodeType == child.COMMENT_NODE:
+ element.removeChild(child)
+
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+ except Exception as e:
+ print(f"ERROR: Failed to parse {xml_file.name}: {e}", file=sys.stderr)
+ raise
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Pack a directory into a DOCX, PPTX, or XLSX file"
+ )
+ parser.add_argument("input_directory", help="Unpacked Office document directory")
+ parser.add_argument("output_file", help="Output Office file (.docx/.pptx/.xlsx)")
+ parser.add_argument(
+ "--original",
+ help="Original file for validation comparison",
+ )
+ parser.add_argument(
+ "--validate",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Run validation with auto-repair (default: true)",
+ )
+ args = parser.parse_args()
+
+ _, message = pack(
+ args.input_directory,
+ args.output_file,
+ original_file=args.original,
+ validate=args.validate,
+ )
+ print(message)
+
+ if "Error" in message:
+ sys.exit(1)
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
new file mode 100644
index 0000000..6454ef9
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chart.xsd
@@ -0,0 +1,1499 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
new file mode 100644
index 0000000..afa4f46
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-chartDrawing.xsd
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
new file mode 100644
index 0000000..64e66b8
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-diagram.xsd
@@ -0,0 +1,1085 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
new file mode 100644
index 0000000..687eea8
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-lockedCanvas.xsd
@@ -0,0 +1,11 @@
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
new file mode 100644
index 0000000..6ac81b0
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-main.xsd
@@ -0,0 +1,3081 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
new file mode 100644
index 0000000..1dbf051
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-picture.xsd
@@ -0,0 +1,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
new file mode 100644
index 0000000..f1af17d
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-spreadsheetDrawing.xsd
@@ -0,0 +1,185 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
new file mode 100644
index 0000000..0a185ab
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/dml-wordprocessingDrawing.xsd
@@ -0,0 +1,287 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
new file mode 100644
index 0000000..14ef488
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/pml.xsd
@@ -0,0 +1,1676 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
new file mode 100644
index 0000000..c20f3bf
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-additionalCharacteristics.xsd
@@ -0,0 +1,28 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
new file mode 100644
index 0000000..ac60252
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-bibliography.xsd
@@ -0,0 +1,144 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
new file mode 100644
index 0000000..424b8ba
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-commonSimpleTypes.xsd
@@ -0,0 +1,174 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
new file mode 100644
index 0000000..2bddce2
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlDataProperties.xsd
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
new file mode 100644
index 0000000..8a8c18b
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-customXmlSchemaProperties.xsd
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
new file mode 100644
index 0000000..5c42706
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd
@@ -0,0 +1,59 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
new file mode 100644
index 0000000..853c341
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd
@@ -0,0 +1,56 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
new file mode 100644
index 0000000..da835ee
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-documentPropertiesVariantTypes.xsd
@@ -0,0 +1,195 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
new file mode 100644
index 0000000..87ad265
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-math.xsd
@@ -0,0 +1,582 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
new file mode 100644
index 0000000..9e86f1b
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/shared-relationshipReference.xsd
@@ -0,0 +1,25 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
new file mode 100644
index 0000000..d0be42e
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/sml.xsd
@@ -0,0 +1,4439 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
new file mode 100644
index 0000000..8821dd1
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-main.xsd
@@ -0,0 +1,570 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
new file mode 100644
index 0000000..ca2575c
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-officeDrawing.xsd
@@ -0,0 +1,509 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
new file mode 100644
index 0000000..dd079e6
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-presentationDrawing.xsd
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
new file mode 100644
index 0000000..3dd6cf6
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-spreadsheetDrawing.xsd
@@ -0,0 +1,108 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
new file mode 100644
index 0000000..f1041e3
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/vml-wordprocessingDrawing.xsd
@@ -0,0 +1,96 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
new file mode 100644
index 0000000..9c5b7a6
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/wml.xsd
@@ -0,0 +1,3646 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
new file mode 100644
index 0000000..0f13678
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ISO-IEC29500-4_2016/xml.xsd
@@ -0,0 +1,116 @@
+
+
+
+
+
+ See http://www.w3.org/XML/1998/namespace.html and
+ http://www.w3.org/TR/REC-xml for information about this namespace.
+
+ This schema document describes the XML namespace, in a form
+ suitable for import by other schema documents.
+
+ Note that local names in this namespace are intended to be defined
+ only by the World Wide Web Consortium or its subgroups. The
+ following names are currently defined in this namespace and should
+ not be used with conflicting semantics by any Working Group,
+ specification, or document instance:
+
+ base (as an attribute name): denotes an attribute whose value
+ provides a URI to be used as the base for interpreting any
+ relative URIs in the scope of the element on which it
+ appears; its value is inherited. This name is reserved
+ by virtue of its definition in the XML Base specification.
+
+ lang (as an attribute name): denotes an attribute whose value
+ is a language code for the natural language of the content of
+ any element; its value is inherited. This name is reserved
+ by virtue of its definition in the XML specification.
+
+ space (as an attribute name): denotes an attribute whose
+ value is a keyword indicating what whitespace processing
+ discipline is intended for the content of the element; its
+ value is inherited. This name is reserved by virtue of its
+ definition in the XML specification.
+
+ Father (in any context at all): denotes Jon Bosak, the chair of
+ the original XML Working Group. This name is reserved by
+ the following decision of the W3C XML Plenary and
+ XML Coordination groups:
+
+ In appreciation for his vision, leadership and dedication
+ the W3C XML Plenary on this 10th day of February, 2000
+ reserves for Jon Bosak in perpetuity the XML name
+ xml:Father
+
+
+
+
+ This schema defines attributes and an attribute group
+ suitable for use by
+ schemas wishing to allow xml:base, xml:lang or xml:space attributes
+ on elements they define.
+
+ To enable this, such a schema must import this schema
+ for the XML namespace, e.g. as follows:
+ <schema . . .>
+ . . .
+ <import namespace="http://www.w3.org/XML/1998/namespace"
+ schemaLocation="http://www.w3.org/2001/03/xml.xsd"/>
+
+ Subsequently, qualified reference to any of the attributes
+ or the group defined below will have the desired effect, e.g.
+
+ <type . . .>
+ . . .
+ <attributeGroup ref="xml:specialAttrs"/>
+
+ will define a type which will schema-validate an instance
+ element with any of those attributes
+
+
+
+ In keeping with the XML Schema WG's standard versioning
+ policy, this schema document will persist at
+ http://www.w3.org/2001/03/xml.xsd.
+ At the date of issue it can also be found at
+ http://www.w3.org/2001/xml.xsd.
+ The schema document at that URI may however change in the future,
+ in order to remain compatible with the latest version of XML Schema
+ itself. In other words, if the XML Schema namespace changes, the version
+ of this document at
+ http://www.w3.org/2001/xml.xsd will change
+ accordingly; the version at
+ http://www.w3.org/2001/03/xml.xsd will not change.
+
+
+
+
+
+ In due course, we should install the relevant ISO 2- and 3-letter
+ codes as the enumerated possible values . . .
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ See http://www.w3.org/TR/xmlbase/ for
+ information about this attribute.
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
new file mode 100644
index 0000000..a6de9d2
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-contentTypes.xsd
@@ -0,0 +1,42 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
new file mode 100644
index 0000000..10e978b
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-coreProperties.xsd
@@ -0,0 +1,50 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
new file mode 100644
index 0000000..4248bf7
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-digSig.xsd
@@ -0,0 +1,49 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
new file mode 100644
index 0000000..5649746
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/ecma/fouth-edition/opc-relationships.xsd
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/mce/mc.xsd b/.claude/skills/xlsx/scripts/office/schemas/mce/mc.xsd
new file mode 100644
index 0000000..ef72545
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/mce/mc.xsd
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2010.xsd b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2010.xsd
new file mode 100644
index 0000000..f65f777
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2010.xsd
@@ -0,0 +1,560 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2012.xsd b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2012.xsd
new file mode 100644
index 0000000..6b00755
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2012.xsd
@@ -0,0 +1,67 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2018.xsd b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2018.xsd
new file mode 100644
index 0000000..f321d33
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-2018.xsd
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cex-2018.xsd b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
new file mode 100644
index 0000000..364c6a9
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cex-2018.xsd
@@ -0,0 +1,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cid-2016.xsd b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
new file mode 100644
index 0000000..fed9d15
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-cid-2016.xsd
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
new file mode 100644
index 0000000..680cf15
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-sdtdatahash-2020.xsd
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-symex-2015.xsd b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
new file mode 100644
index 0000000..89ada90
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/schemas/microsoft/wml-symex-2015.xsd
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/.claude/skills/xlsx/scripts/office/soffice.py b/.claude/skills/xlsx/scripts/office/soffice.py
new file mode 100644
index 0000000..c7f7e32
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/soffice.py
@@ -0,0 +1,183 @@
+"""
+Helper for running LibreOffice (soffice) in environments where AF_UNIX
+sockets may be blocked (e.g., sandboxed VMs). Detects the restriction
+at runtime and applies an LD_PRELOAD shim if needed.
+
+Usage:
+ from office.soffice import run_soffice, get_soffice_env
+
+ # Option 1 – run soffice directly
+ result = run_soffice(["--headless", "--convert-to", "pdf", "input.docx"])
+
+ # Option 2 – get env dict for your own subprocess calls
+ env = get_soffice_env()
+ subprocess.run(["soffice", ...], env=env)
+"""
+
+import os
+import socket
+import subprocess
+import tempfile
+from pathlib import Path
+
+
+def get_soffice_env() -> dict:
+ env = os.environ.copy()
+ env["SAL_USE_VCLPLUGIN"] = "svp"
+
+ if _needs_shim():
+ shim = _ensure_shim()
+ env["LD_PRELOAD"] = str(shim)
+
+ return env
+
+
+def run_soffice(args: list[str], **kwargs) -> subprocess.CompletedProcess:
+ env = get_soffice_env()
+ return subprocess.run(["soffice"] + args, env=env, **kwargs)
+
+
+
+_SHIM_SO = Path(tempfile.gettempdir()) / "lo_socket_shim.so"
+
+
+def _needs_shim() -> bool:
+ try:
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ s.close()
+ return False
+ except OSError:
+ return True
+
+
+def _ensure_shim() -> Path:
+ if _SHIM_SO.exists():
+ return _SHIM_SO
+
+ src = Path(tempfile.gettempdir()) / "lo_socket_shim.c"
+ src.write_text(_SHIM_SOURCE)
+ subprocess.run(
+ ["gcc", "-shared", "-fPIC", "-o", str(_SHIM_SO), str(src), "-ldl"],
+ check=True,
+ capture_output=True,
+ )
+ src.unlink()
+ return _SHIM_SO
+
+
+
+_SHIM_SOURCE = r"""
+#define _GNU_SOURCE
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+static int (*real_socket)(int, int, int);
+static int (*real_socketpair)(int, int, int, int[2]);
+static int (*real_listen)(int, int);
+static int (*real_accept)(int, struct sockaddr *, socklen_t *);
+static int (*real_close)(int);
+static int (*real_read)(int, void *, size_t);
+
+/* Per-FD bookkeeping (FDs >= 1024 are passed through unshimmed). */
+static int is_shimmed[1024];
+static int peer_of[1024];
+static int wake_r[1024]; /* accept() blocks reading this */
+static int wake_w[1024]; /* close() writes to this */
+static int listener_fd = -1; /* FD that received listen() */
+
+__attribute__((constructor))
+static void init(void) {
+ real_socket = dlsym(RTLD_NEXT, "socket");
+ real_socketpair = dlsym(RTLD_NEXT, "socketpair");
+ real_listen = dlsym(RTLD_NEXT, "listen");
+ real_accept = dlsym(RTLD_NEXT, "accept");
+ real_close = dlsym(RTLD_NEXT, "close");
+ real_read = dlsym(RTLD_NEXT, "read");
+ for (int i = 0; i < 1024; i++) {
+ peer_of[i] = -1;
+ wake_r[i] = -1;
+ wake_w[i] = -1;
+ }
+}
+
+/* ---- socket ---------------------------------------------------------- */
+int socket(int domain, int type, int protocol) {
+ if (domain == AF_UNIX) {
+ int fd = real_socket(domain, type, protocol);
+ if (fd >= 0) return fd;
+ /* socket(AF_UNIX) blocked – fall back to socketpair(). */
+ int sv[2];
+ if (real_socketpair(domain, type, protocol, sv) == 0) {
+ if (sv[0] >= 0 && sv[0] < 1024) {
+ is_shimmed[sv[0]] = 1;
+ peer_of[sv[0]] = sv[1];
+ int wp[2];
+ if (pipe(wp) == 0) {
+ wake_r[sv[0]] = wp[0];
+ wake_w[sv[0]] = wp[1];
+ }
+ }
+ return sv[0];
+ }
+ errno = EPERM;
+ return -1;
+ }
+ return real_socket(domain, type, protocol);
+}
+
+/* ---- listen ---------------------------------------------------------- */
+int listen(int sockfd, int backlog) {
+ if (sockfd >= 0 && sockfd < 1024 && is_shimmed[sockfd]) {
+ listener_fd = sockfd;
+ return 0;
+ }
+ return real_listen(sockfd, backlog);
+}
+
+/* ---- accept ---------------------------------------------------------- */
+int accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) {
+ if (sockfd >= 0 && sockfd < 1024 && is_shimmed[sockfd]) {
+ /* Block until close() writes to the wake pipe. */
+ if (wake_r[sockfd] >= 0) {
+ char buf;
+ real_read(wake_r[sockfd], &buf, 1);
+ }
+ errno = ECONNABORTED;
+ return -1;
+ }
+ return real_accept(sockfd, addr, addrlen);
+}
+
+/* ---- close ----------------------------------------------------------- */
+int close(int fd) {
+ if (fd >= 0 && fd < 1024 && is_shimmed[fd]) {
+ int was_listener = (fd == listener_fd);
+ is_shimmed[fd] = 0;
+
+ if (wake_w[fd] >= 0) { /* unblock accept() */
+ char c = 0;
+ write(wake_w[fd], &c, 1);
+ real_close(wake_w[fd]);
+ wake_w[fd] = -1;
+ }
+ if (wake_r[fd] >= 0) { real_close(wake_r[fd]); wake_r[fd] = -1; }
+ if (peer_of[fd] >= 0) { real_close(peer_of[fd]); peer_of[fd] = -1; }
+
+ if (was_listener)
+ _exit(0); /* conversion done – exit */
+ }
+ return real_close(fd);
+}
+"""
+
+
+
+if __name__ == "__main__":
+ import sys
+ result = run_soffice(sys.argv[1:])
+ sys.exit(result.returncode)
diff --git a/.claude/skills/xlsx/scripts/office/unpack.py b/.claude/skills/xlsx/scripts/office/unpack.py
new file mode 100644
index 0000000..0015253
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/unpack.py
@@ -0,0 +1,132 @@
+"""Unpack Office files (DOCX, PPTX, XLSX) for editing.
+
+Extracts the ZIP archive, pretty-prints XML files, and optionally:
+- Merges adjacent runs with identical formatting (DOCX only)
+- Simplifies adjacent tracked changes from same author (DOCX only)
+
+Usage:
+ python unpack.py [options]
+
+Examples:
+ python unpack.py document.docx unpacked/
+ python unpack.py presentation.pptx unpacked/
+ python unpack.py document.docx unpacked/ --merge-runs false
+"""
+
+import argparse
+import sys
+import zipfile
+from pathlib import Path
+
+import defusedxml.minidom
+
+from helpers.merge_runs import merge_runs as do_merge_runs
+from helpers.simplify_redlines import simplify_redlines as do_simplify_redlines
+
+SMART_QUOTE_REPLACEMENTS = {
+ "\u201c": "“",
+ "\u201d": "”",
+ "\u2018": "‘",
+ "\u2019": "’",
+}
+
+
+def unpack(
+ input_file: str,
+ output_directory: str,
+ merge_runs: bool = True,
+ simplify_redlines: bool = True,
+) -> tuple[None, str]:
+ input_path = Path(input_file)
+ output_path = Path(output_directory)
+ suffix = input_path.suffix.lower()
+
+ if not input_path.exists():
+ return None, f"Error: {input_file} does not exist"
+
+ if suffix not in {".docx", ".pptx", ".xlsx"}:
+ return None, f"Error: {input_file} must be a .docx, .pptx, or .xlsx file"
+
+ try:
+ output_path.mkdir(parents=True, exist_ok=True)
+
+ with zipfile.ZipFile(input_path, "r") as zf:
+ zf.extractall(output_path)
+
+ xml_files = list(output_path.rglob("*.xml")) + list(output_path.rglob("*.rels"))
+ for xml_file in xml_files:
+ _pretty_print_xml(xml_file)
+
+ message = f"Unpacked {input_file} ({len(xml_files)} XML files)"
+
+ if suffix == ".docx":
+ if simplify_redlines:
+ simplify_count, _ = do_simplify_redlines(str(output_path))
+ message += f", simplified {simplify_count} tracked changes"
+
+ if merge_runs:
+ merge_count, _ = do_merge_runs(str(output_path))
+ message += f", merged {merge_count} runs"
+
+ for xml_file in xml_files:
+ _escape_smart_quotes(xml_file)
+
+ return None, message
+
+ except zipfile.BadZipFile:
+ return None, f"Error: {input_file} is not a valid Office file"
+ except Exception as e:
+ return None, f"Error unpacking: {e}"
+
+
+def _pretty_print_xml(xml_file: Path) -> None:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ xml_file.write_bytes(dom.toprettyxml(indent=" ", encoding="utf-8"))
+ except Exception:
+ pass
+
+
+def _escape_smart_quotes(xml_file: Path) -> None:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ for char, entity in SMART_QUOTE_REPLACEMENTS.items():
+ content = content.replace(char, entity)
+ xml_file.write_text(content, encoding="utf-8")
+ except Exception:
+ pass
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ description="Unpack an Office file (DOCX, PPTX, XLSX) for editing"
+ )
+ parser.add_argument("input_file", help="Office file to unpack")
+ parser.add_argument("output_directory", help="Output directory")
+ parser.add_argument(
+ "--merge-runs",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Merge adjacent runs with identical formatting (DOCX only, default: true)",
+ )
+ parser.add_argument(
+ "--simplify-redlines",
+ type=lambda x: x.lower() == "true",
+ default=True,
+ metavar="true|false",
+ help="Merge adjacent tracked changes from same author (DOCX only, default: true)",
+ )
+ args = parser.parse_args()
+
+ _, message = unpack(
+ args.input_file,
+ args.output_directory,
+ merge_runs=args.merge_runs,
+ simplify_redlines=args.simplify_redlines,
+ )
+ print(message)
+
+ if "Error" in message:
+ sys.exit(1)
diff --git a/.claude/skills/xlsx/scripts/office/validate.py b/.claude/skills/xlsx/scripts/office/validate.py
new file mode 100644
index 0000000..03b01f6
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/validate.py
@@ -0,0 +1,111 @@
+"""
+Command line tool to validate Office document XML files against XSD schemas and tracked changes.
+
+Usage:
+ python validate.py [--original ] [--auto-repair] [--author NAME]
+
+The first argument can be either:
+- An unpacked directory containing the Office document XML files
+- A packed Office file (.docx/.pptx/.xlsx) which will be unpacked to a temp directory
+
+Auto-repair fixes:
+- paraId/durableId values that exceed OOXML limits
+- Missing xml:space="preserve" on w:t elements with whitespace
+"""
+
+import argparse
+import sys
+import tempfile
+import zipfile
+from pathlib import Path
+
+from validators import DOCXSchemaValidator, PPTXSchemaValidator, RedliningValidator
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Validate Office document XML files")
+ parser.add_argument(
+ "path",
+ help="Path to unpacked directory or packed Office file (.docx/.pptx/.xlsx)",
+ )
+ parser.add_argument(
+ "--original",
+ required=False,
+ default=None,
+ help="Path to original file (.docx/.pptx/.xlsx). If omitted, all XSD errors are reported and redlining validation is skipped.",
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="Enable verbose output",
+ )
+ parser.add_argument(
+ "--auto-repair",
+ action="store_true",
+ help="Automatically repair common issues (hex IDs, whitespace preservation)",
+ )
+ parser.add_argument(
+ "--author",
+ default="Claude",
+ help="Author name for redlining validation (default: Claude)",
+ )
+ args = parser.parse_args()
+
+ path = Path(args.path)
+ assert path.exists(), f"Error: {path} does not exist"
+
+ original_file = None
+ if args.original:
+ original_file = Path(args.original)
+ assert original_file.is_file(), f"Error: {original_file} is not a file"
+ assert original_file.suffix.lower() in [".docx", ".pptx", ".xlsx"], (
+ f"Error: {original_file} must be a .docx, .pptx, or .xlsx file"
+ )
+
+ file_extension = (original_file or path).suffix.lower()
+ assert file_extension in [".docx", ".pptx", ".xlsx"], (
+ f"Error: Cannot determine file type from {path}. Use --original or provide a .docx/.pptx/.xlsx file."
+ )
+
+ if path.is_file() and path.suffix.lower() in [".docx", ".pptx", ".xlsx"]:
+ temp_dir = tempfile.mkdtemp()
+ with zipfile.ZipFile(path, "r") as zf:
+ zf.extractall(temp_dir)
+ unpacked_dir = Path(temp_dir)
+ else:
+ assert path.is_dir(), f"Error: {path} is not a directory or Office file"
+ unpacked_dir = path
+
+ match file_extension:
+ case ".docx":
+ validators = [
+ DOCXSchemaValidator(unpacked_dir, original_file, verbose=args.verbose),
+ ]
+ if original_file:
+ validators.append(
+ RedliningValidator(unpacked_dir, original_file, verbose=args.verbose, author=args.author)
+ )
+ case ".pptx":
+ validators = [
+ PPTXSchemaValidator(unpacked_dir, original_file, verbose=args.verbose),
+ ]
+ case _:
+ print(f"Error: Validation not supported for file type {file_extension}")
+ sys.exit(1)
+
+ if args.auto_repair:
+ total_repairs = sum(v.repair() for v in validators)
+ if total_repairs:
+ print(f"Auto-repaired {total_repairs} issue(s)")
+
+ success = all(v.validate() for v in validators)
+
+ if success:
+ print("All validations PASSED!")
+
+ sys.exit(0 if success else 1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.claude/skills/xlsx/scripts/office/validators/__init__.py b/.claude/skills/xlsx/scripts/office/validators/__init__.py
new file mode 100644
index 0000000..db092ec
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/validators/__init__.py
@@ -0,0 +1,15 @@
+"""
+Validation modules for Word document processing.
+"""
+
+from .base import BaseSchemaValidator
+from .docx import DOCXSchemaValidator
+from .pptx import PPTXSchemaValidator
+from .redlining import RedliningValidator
+
+__all__ = [
+ "BaseSchemaValidator",
+ "DOCXSchemaValidator",
+ "PPTXSchemaValidator",
+ "RedliningValidator",
+]
diff --git a/.claude/skills/xlsx/scripts/office/validators/base.py b/.claude/skills/xlsx/scripts/office/validators/base.py
new file mode 100644
index 0000000..db4a06a
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/validators/base.py
@@ -0,0 +1,847 @@
+"""
+Base validator with common validation logic for document files.
+"""
+
+import re
+from pathlib import Path
+
+import defusedxml.minidom
+import lxml.etree
+
+
+class BaseSchemaValidator:
+
+ IGNORED_VALIDATION_ERRORS = [
+ "hyphenationZone",
+ "purl.org/dc/terms",
+ ]
+
+ UNIQUE_ID_REQUIREMENTS = {
+ "comment": ("id", "file"),
+ "commentrangestart": ("id", "file"),
+ "commentrangeend": ("id", "file"),
+ "bookmarkstart": ("id", "file"),
+ "bookmarkend": ("id", "file"),
+ "sldid": ("id", "file"),
+ "sldmasterid": ("id", "global"),
+ "sldlayoutid": ("id", "global"),
+ "cm": ("authorid", "file"),
+ "sheet": ("sheetid", "file"),
+ "definedname": ("id", "file"),
+ "cxnsp": ("id", "file"),
+ "sp": ("id", "file"),
+ "pic": ("id", "file"),
+ "grpsp": ("id", "file"),
+ }
+
+ EXCLUDED_ID_CONTAINERS = {
+ "sectionlst",
+ }
+
+ ELEMENT_RELATIONSHIP_TYPES = {}
+
+ SCHEMA_MAPPINGS = {
+ "word": "ISO-IEC29500-4_2016/wml.xsd",
+ "ppt": "ISO-IEC29500-4_2016/pml.xsd",
+ "xl": "ISO-IEC29500-4_2016/sml.xsd",
+ "[Content_Types].xml": "ecma/fouth-edition/opc-contentTypes.xsd",
+ "app.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesExtended.xsd",
+ "core.xml": "ecma/fouth-edition/opc-coreProperties.xsd",
+ "custom.xml": "ISO-IEC29500-4_2016/shared-documentPropertiesCustom.xsd",
+ ".rels": "ecma/fouth-edition/opc-relationships.xsd",
+ "people.xml": "microsoft/wml-2012.xsd",
+ "commentsIds.xml": "microsoft/wml-cid-2016.xsd",
+ "commentsExtensible.xml": "microsoft/wml-cex-2018.xsd",
+ "commentsExtended.xml": "microsoft/wml-2012.xsd",
+ "chart": "ISO-IEC29500-4_2016/dml-chart.xsd",
+ "theme": "ISO-IEC29500-4_2016/dml-main.xsd",
+ "drawing": "ISO-IEC29500-4_2016/dml-main.xsd",
+ }
+
+ MC_NAMESPACE = "http://schemas.openxmlformats.org/markup-compatibility/2006"
+ XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
+
+ PACKAGE_RELATIONSHIPS_NAMESPACE = (
+ "http://schemas.openxmlformats.org/package/2006/relationships"
+ )
+ OFFICE_RELATIONSHIPS_NAMESPACE = (
+ "http://schemas.openxmlformats.org/officeDocument/2006/relationships"
+ )
+ CONTENT_TYPES_NAMESPACE = (
+ "http://schemas.openxmlformats.org/package/2006/content-types"
+ )
+
+ MAIN_CONTENT_FOLDERS = {"word", "ppt", "xl"}
+
+ OOXML_NAMESPACES = {
+ "http://schemas.openxmlformats.org/officeDocument/2006/math",
+ "http://schemas.openxmlformats.org/officeDocument/2006/relationships",
+ "http://schemas.openxmlformats.org/schemaLibrary/2006/main",
+ "http://schemas.openxmlformats.org/drawingml/2006/main",
+ "http://schemas.openxmlformats.org/drawingml/2006/chart",
+ "http://schemas.openxmlformats.org/drawingml/2006/chartDrawing",
+ "http://schemas.openxmlformats.org/drawingml/2006/diagram",
+ "http://schemas.openxmlformats.org/drawingml/2006/picture",
+ "http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing",
+ "http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing",
+ "http://schemas.openxmlformats.org/wordprocessingml/2006/main",
+ "http://schemas.openxmlformats.org/presentationml/2006/main",
+ "http://schemas.openxmlformats.org/spreadsheetml/2006/main",
+ "http://schemas.openxmlformats.org/officeDocument/2006/sharedTypes",
+ "http://www.w3.org/XML/1998/namespace",
+ }
+
+ def __init__(self, unpacked_dir, original_file=None, verbose=False):
+ self.unpacked_dir = Path(unpacked_dir).resolve()
+ self.original_file = Path(original_file) if original_file else None
+ self.verbose = verbose
+
+ self.schemas_dir = Path(__file__).parent.parent / "schemas"
+
+ patterns = ["*.xml", "*.rels"]
+ self.xml_files = [
+ f for pattern in patterns for f in self.unpacked_dir.rglob(pattern)
+ ]
+
+ if not self.xml_files:
+ print(f"Warning: No XML files found in {self.unpacked_dir}")
+
+ def validate(self):
+ raise NotImplementedError("Subclasses must implement the validate method")
+
+ def repair(self) -> int:
+ return self.repair_whitespace_preservation()
+
+ def repair_whitespace_preservation(self) -> int:
+ repairs = 0
+
+ for xml_file in self.xml_files:
+ try:
+ content = xml_file.read_text(encoding="utf-8")
+ dom = defusedxml.minidom.parseString(content)
+ modified = False
+
+ for elem in dom.getElementsByTagName("*"):
+ if elem.tagName.endswith(":t") and elem.firstChild:
+ text = elem.firstChild.nodeValue
+ if text and (text.startswith((' ', '\t')) or text.endswith((' ', '\t'))):
+ if elem.getAttribute("xml:space") != "preserve":
+ elem.setAttribute("xml:space", "preserve")
+ text_preview = repr(text[:30]) + "..." if len(text) > 30 else repr(text)
+ print(f" Repaired: {xml_file.name}: Added xml:space='preserve' to {elem.tagName}: {text_preview}")
+ repairs += 1
+ modified = True
+
+ if modified:
+ xml_file.write_bytes(dom.toxml(encoding="UTF-8"))
+
+ except Exception:
+ pass
+
+ return repairs
+
+ def validate_xml(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ try:
+ lxml.etree.parse(str(xml_file))
+ except lxml.etree.XMLSyntaxError as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {e.lineno}: {e.msg}"
+ )
+ except Exception as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Unexpected error: {str(e)}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} XML violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All XML files are well-formed")
+ return True
+
+ def validate_namespaces(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ declared = set(root.nsmap.keys()) - {None}
+
+ for attr_val in [
+ v for k, v in root.attrib.items() if k.endswith("Ignorable")
+ ]:
+ undeclared = set(attr_val.split()) - declared
+ errors.extend(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Namespace '{ns}' in Ignorable but not declared"
+ for ns in undeclared
+ )
+ except lxml.etree.XMLSyntaxError:
+ continue
+
+ if errors:
+ print(f"FAILED - {len(errors)} namespace issues:")
+ for error in errors:
+ print(error)
+ return False
+ if self.verbose:
+ print("PASSED - All namespace prefixes properly declared")
+ return True
+
+ def validate_unique_ids(self):
+ errors = []
+ global_ids = {}
+
+ for xml_file in self.xml_files:
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ file_ids = {}
+
+ mc_elements = root.xpath(
+ ".//mc:AlternateContent", namespaces={"mc": self.MC_NAMESPACE}
+ )
+ for elem in mc_elements:
+ elem.getparent().remove(elem)
+
+ for elem in root.iter():
+ tag = (
+ elem.tag.split("}")[-1].lower()
+ if "}" in elem.tag
+ else elem.tag.lower()
+ )
+
+ if tag in self.UNIQUE_ID_REQUIREMENTS:
+ in_excluded_container = any(
+ ancestor.tag.split("}")[-1].lower() in self.EXCLUDED_ID_CONTAINERS
+ for ancestor in elem.iterancestors()
+ )
+ if in_excluded_container:
+ continue
+
+ attr_name, scope = self.UNIQUE_ID_REQUIREMENTS[tag]
+
+ id_value = None
+ for attr, value in elem.attrib.items():
+ attr_local = (
+ attr.split("}")[-1].lower()
+ if "}" in attr
+ else attr.lower()
+ )
+ if attr_local == attr_name:
+ id_value = value
+ break
+
+ if id_value is not None:
+ if scope == "global":
+ if id_value in global_ids:
+ prev_file, prev_line, prev_tag = global_ids[
+ id_value
+ ]
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: Global ID '{id_value}' in <{tag}> "
+ f"already used in {prev_file} at line {prev_line} in <{prev_tag}>"
+ )
+ else:
+ global_ids[id_value] = (
+ xml_file.relative_to(self.unpacked_dir),
+ elem.sourceline,
+ tag,
+ )
+ elif scope == "file":
+ key = (tag, attr_name)
+ if key not in file_ids:
+ file_ids[key] = {}
+
+ if id_value in file_ids[key]:
+ prev_line = file_ids[key][id_value]
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: Duplicate {attr_name}='{id_value}' in <{tag}> "
+ f"(first occurrence at line {prev_line})"
+ )
+ else:
+ file_ids[key][id_value] = elem.sourceline
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} ID uniqueness violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All required IDs are unique")
+ return True
+
+ def validate_file_references(self):
+ errors = []
+
+ rels_files = list(self.unpacked_dir.rglob("*.rels"))
+
+ if not rels_files:
+ if self.verbose:
+ print("PASSED - No .rels files found")
+ return True
+
+ all_files = []
+ for file_path in self.unpacked_dir.rglob("*"):
+ if (
+ file_path.is_file()
+ and file_path.name != "[Content_Types].xml"
+ and not file_path.name.endswith(".rels")
+ ):
+ all_files.append(file_path.resolve())
+
+ all_referenced_files = set()
+
+ if self.verbose:
+ print(
+ f"Found {len(rels_files)} .rels files and {len(all_files)} target files"
+ )
+
+ for rels_file in rels_files:
+ try:
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+
+ rels_dir = rels_file.parent
+
+ referenced_files = set()
+ broken_refs = []
+
+ for rel in rels_root.findall(
+ ".//ns:Relationship",
+ namespaces={"ns": self.PACKAGE_RELATIONSHIPS_NAMESPACE},
+ ):
+ target = rel.get("Target")
+ if target and not target.startswith(
+ ("http", "mailto:")
+ ):
+ if target.startswith("/"):
+ target_path = self.unpacked_dir / target.lstrip("/")
+ elif rels_file.name == ".rels":
+ target_path = self.unpacked_dir / target
+ else:
+ base_dir = rels_dir.parent
+ target_path = base_dir / target
+
+ try:
+ target_path = target_path.resolve()
+ if target_path.exists() and target_path.is_file():
+ referenced_files.add(target_path)
+ all_referenced_files.add(target_path)
+ else:
+ broken_refs.append((target, rel.sourceline))
+ except (OSError, ValueError):
+ broken_refs.append((target, rel.sourceline))
+
+ if broken_refs:
+ rel_path = rels_file.relative_to(self.unpacked_dir)
+ for broken_ref, line_num in broken_refs:
+ errors.append(
+ f" {rel_path}: Line {line_num}: Broken reference to {broken_ref}"
+ )
+
+ except Exception as e:
+ rel_path = rels_file.relative_to(self.unpacked_dir)
+ errors.append(f" Error parsing {rel_path}: {e}")
+
+ unreferenced_files = set(all_files) - all_referenced_files
+
+ if unreferenced_files:
+ for unref_file in sorted(unreferenced_files):
+ unref_rel_path = unref_file.relative_to(self.unpacked_dir)
+ errors.append(f" Unreferenced file: {unref_rel_path}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} relationship validation errors:")
+ for error in errors:
+ print(error)
+ print(
+ "CRITICAL: These errors will cause the document to appear corrupt. "
+ + "Broken references MUST be fixed, "
+ + "and unreferenced files MUST be referenced or removed."
+ )
+ return False
+ else:
+ if self.verbose:
+ print(
+ "PASSED - All references are valid and all files are properly referenced"
+ )
+ return True
+
+ def validate_all_relationship_ids(self):
+ import lxml.etree
+
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.suffix == ".rels":
+ continue
+
+ rels_dir = xml_file.parent / "_rels"
+ rels_file = rels_dir / f"{xml_file.name}.rels"
+
+ if not rels_file.exists():
+ continue
+
+ try:
+ rels_root = lxml.etree.parse(str(rels_file)).getroot()
+ rid_to_type = {}
+
+ for rel in rels_root.findall(
+ f".//{{{self.PACKAGE_RELATIONSHIPS_NAMESPACE}}}Relationship"
+ ):
+ rid = rel.get("Id")
+ rel_type = rel.get("Type", "")
+ if rid:
+ if rid in rid_to_type:
+ rels_rel_path = rels_file.relative_to(self.unpacked_dir)
+ errors.append(
+ f" {rels_rel_path}: Line {rel.sourceline}: "
+ f"Duplicate relationship ID '{rid}' (IDs must be unique)"
+ )
+ type_name = (
+ rel_type.split("/")[-1] if "/" in rel_type else rel_type
+ )
+ rid_to_type[rid] = type_name
+
+ xml_root = lxml.etree.parse(str(xml_file)).getroot()
+
+ r_ns = self.OFFICE_RELATIONSHIPS_NAMESPACE
+ rid_attrs_to_check = ["id", "embed", "link"]
+ for elem in xml_root.iter():
+ for attr_name in rid_attrs_to_check:
+ rid_attr = elem.get(f"{{{r_ns}}}{attr_name}")
+ if not rid_attr:
+ continue
+ xml_rel_path = xml_file.relative_to(self.unpacked_dir)
+ elem_name = (
+ elem.tag.split("}")[-1] if "}" in elem.tag else elem.tag
+ )
+
+ if rid_attr not in rid_to_type:
+ errors.append(
+ f" {xml_rel_path}: Line {elem.sourceline}: "
+ f"<{elem_name}> r:{attr_name} references non-existent relationship '{rid_attr}' "
+ f"(valid IDs: {', '.join(sorted(rid_to_type.keys())[:5])}{'...' if len(rid_to_type) > 5 else ''})"
+ )
+ elif attr_name == "id" and self.ELEMENT_RELATIONSHIP_TYPES:
+ expected_type = self._get_expected_relationship_type(
+ elem_name
+ )
+ if expected_type:
+ actual_type = rid_to_type[rid_attr]
+ if expected_type not in actual_type.lower():
+ errors.append(
+ f" {xml_rel_path}: Line {elem.sourceline}: "
+ f"<{elem_name}> references '{rid_attr}' which points to '{actual_type}' "
+ f"but should point to a '{expected_type}' relationship"
+ )
+
+ except Exception as e:
+ xml_rel_path = xml_file.relative_to(self.unpacked_dir)
+ errors.append(f" Error processing {xml_rel_path}: {e}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} relationship ID reference errors:")
+ for error in errors:
+ print(error)
+ print("\nThese ID mismatches will cause the document to appear corrupt!")
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All relationship ID references are valid")
+ return True
+
+ def _get_expected_relationship_type(self, element_name):
+ elem_lower = element_name.lower()
+
+ if elem_lower in self.ELEMENT_RELATIONSHIP_TYPES:
+ return self.ELEMENT_RELATIONSHIP_TYPES[elem_lower]
+
+ if elem_lower.endswith("id") and len(elem_lower) > 2:
+ prefix = elem_lower[:-2]
+ if prefix.endswith("master"):
+ return prefix.lower()
+ elif prefix.endswith("layout"):
+ return prefix.lower()
+ else:
+ if prefix == "sld":
+ return "slide"
+ return prefix.lower()
+
+ if elem_lower.endswith("reference") and len(elem_lower) > 9:
+ prefix = elem_lower[:-9]
+ return prefix.lower()
+
+ return None
+
+ def validate_content_types(self):
+ errors = []
+
+ content_types_file = self.unpacked_dir / "[Content_Types].xml"
+ if not content_types_file.exists():
+ print("FAILED - [Content_Types].xml file not found")
+ return False
+
+ try:
+ root = lxml.etree.parse(str(content_types_file)).getroot()
+ declared_parts = set()
+ declared_extensions = set()
+
+ for override in root.findall(
+ f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Override"
+ ):
+ part_name = override.get("PartName")
+ if part_name is not None:
+ declared_parts.add(part_name.lstrip("/"))
+
+ for default in root.findall(
+ f".//{{{self.CONTENT_TYPES_NAMESPACE}}}Default"
+ ):
+ extension = default.get("Extension")
+ if extension is not None:
+ declared_extensions.add(extension.lower())
+
+ declarable_roots = {
+ "sld",
+ "sldLayout",
+ "sldMaster",
+ "presentation",
+ "document",
+ "workbook",
+ "worksheet",
+ "theme",
+ }
+
+ media_extensions = {
+ "png": "image/png",
+ "jpg": "image/jpeg",
+ "jpeg": "image/jpeg",
+ "gif": "image/gif",
+ "bmp": "image/bmp",
+ "tiff": "image/tiff",
+ "wmf": "image/x-wmf",
+ "emf": "image/x-emf",
+ }
+
+ all_files = list(self.unpacked_dir.rglob("*"))
+ all_files = [f for f in all_files if f.is_file()]
+
+ for xml_file in self.xml_files:
+ path_str = str(xml_file.relative_to(self.unpacked_dir)).replace(
+ "\\", "/"
+ )
+
+ if any(
+ skip in path_str
+ for skip in [".rels", "[Content_Types]", "docProps/", "_rels/"]
+ ):
+ continue
+
+ try:
+ root_tag = lxml.etree.parse(str(xml_file)).getroot().tag
+ root_name = root_tag.split("}")[-1] if "}" in root_tag else root_tag
+
+ if root_name in declarable_roots and path_str not in declared_parts:
+ errors.append(
+ f" {path_str}: File with <{root_name}> root not declared in [Content_Types].xml"
+ )
+
+ except Exception:
+ continue
+
+ for file_path in all_files:
+ if file_path.suffix.lower() in {".xml", ".rels"}:
+ continue
+ if file_path.name == "[Content_Types].xml":
+ continue
+ if "_rels" in file_path.parts or "docProps" in file_path.parts:
+ continue
+
+ extension = file_path.suffix.lstrip(".").lower()
+ if extension and extension not in declared_extensions:
+ if extension in media_extensions:
+ relative_path = file_path.relative_to(self.unpacked_dir)
+ errors.append(
+ f' {relative_path}: File with extension \'{extension}\' not declared in [Content_Types].xml - should add: '
+ )
+
+ except Exception as e:
+ errors.append(f" Error parsing [Content_Types].xml: {e}")
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} content type declaration errors:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print(
+ "PASSED - All content files are properly declared in [Content_Types].xml"
+ )
+ return True
+
+ def validate_file_against_xsd(self, xml_file, verbose=False):
+ xml_file = Path(xml_file).resolve()
+ unpacked_dir = self.unpacked_dir.resolve()
+
+ is_valid, current_errors = self._validate_single_file_xsd(
+ xml_file, unpacked_dir
+ )
+
+ if is_valid is None:
+ return None, set()
+ elif is_valid:
+ return True, set()
+
+ original_errors = self._get_original_file_errors(xml_file)
+
+ assert current_errors is not None
+ new_errors = current_errors - original_errors
+
+ new_errors = {
+ e for e in new_errors
+ if not any(pattern in e for pattern in self.IGNORED_VALIDATION_ERRORS)
+ }
+
+ if new_errors:
+ if verbose:
+ relative_path = xml_file.relative_to(unpacked_dir)
+ print(f"FAILED - {relative_path}: {len(new_errors)} new error(s)")
+ for error in list(new_errors)[:3]:
+ truncated = error[:250] + "..." if len(error) > 250 else error
+ print(f" - {truncated}")
+ return False, new_errors
+ else:
+ if verbose:
+ print(
+ f"PASSED - No new errors (original had {len(current_errors)} errors)"
+ )
+ return True, set()
+
+ def validate_against_xsd(self):
+ new_errors = []
+ original_error_count = 0
+ valid_count = 0
+ skipped_count = 0
+
+ for xml_file in self.xml_files:
+ relative_path = str(xml_file.relative_to(self.unpacked_dir))
+ is_valid, new_file_errors = self.validate_file_against_xsd(
+ xml_file, verbose=False
+ )
+
+ if is_valid is None:
+ skipped_count += 1
+ continue
+ elif is_valid and not new_file_errors:
+ valid_count += 1
+ continue
+ elif is_valid:
+ original_error_count += 1
+ valid_count += 1
+ continue
+
+ new_errors.append(f" {relative_path}: {len(new_file_errors)} new error(s)")
+ for error in list(new_file_errors)[:3]:
+ new_errors.append(
+ f" - {error[:250]}..." if len(error) > 250 else f" - {error}"
+ )
+
+ if self.verbose:
+ print(f"Validated {len(self.xml_files)} files:")
+ print(f" - Valid: {valid_count}")
+ print(f" - Skipped (no schema): {skipped_count}")
+ if original_error_count:
+ print(f" - With original errors (ignored): {original_error_count}")
+ print(
+ f" - With NEW errors: {len(new_errors) > 0 and len([e for e in new_errors if not e.startswith(' ')]) or 0}"
+ )
+
+ if new_errors:
+ print("\nFAILED - Found NEW validation errors:")
+ for error in new_errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("\nPASSED - No new XSD validation errors introduced")
+ return True
+
+ def _get_schema_path(self, xml_file):
+ if xml_file.name in self.SCHEMA_MAPPINGS:
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.name]
+
+ if xml_file.suffix == ".rels":
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[".rels"]
+
+ if "charts/" in str(xml_file) and xml_file.name.startswith("chart"):
+ return self.schemas_dir / self.SCHEMA_MAPPINGS["chart"]
+
+ if "theme/" in str(xml_file) and xml_file.name.startswith("theme"):
+ return self.schemas_dir / self.SCHEMA_MAPPINGS["theme"]
+
+ if xml_file.parent.name in self.MAIN_CONTENT_FOLDERS:
+ return self.schemas_dir / self.SCHEMA_MAPPINGS[xml_file.parent.name]
+
+ return None
+
+ def _clean_ignorable_namespaces(self, xml_doc):
+ xml_string = lxml.etree.tostring(xml_doc, encoding="unicode")
+ xml_copy = lxml.etree.fromstring(xml_string)
+
+ for elem in xml_copy.iter():
+ attrs_to_remove = []
+
+ for attr in elem.attrib:
+ if "{" in attr:
+ ns = attr.split("}")[0][1:]
+ if ns not in self.OOXML_NAMESPACES:
+ attrs_to_remove.append(attr)
+
+ for attr in attrs_to_remove:
+ del elem.attrib[attr]
+
+ self._remove_ignorable_elements(xml_copy)
+
+ return lxml.etree.ElementTree(xml_copy)
+
+ def _remove_ignorable_elements(self, root):
+ elements_to_remove = []
+
+ for elem in list(root):
+ if not hasattr(elem, "tag") or callable(elem.tag):
+ continue
+
+ tag_str = str(elem.tag)
+ if tag_str.startswith("{"):
+ ns = tag_str.split("}")[0][1:]
+ if ns not in self.OOXML_NAMESPACES:
+ elements_to_remove.append(elem)
+ continue
+
+ self._remove_ignorable_elements(elem)
+
+ for elem in elements_to_remove:
+ root.remove(elem)
+
+ def _preprocess_for_mc_ignorable(self, xml_doc):
+ root = xml_doc.getroot()
+
+ if f"{{{self.MC_NAMESPACE}}}Ignorable" in root.attrib:
+ del root.attrib[f"{{{self.MC_NAMESPACE}}}Ignorable"]
+
+ return xml_doc
+
+ def _validate_single_file_xsd(self, xml_file, base_path):
+ schema_path = self._get_schema_path(xml_file)
+ if not schema_path:
+ return None, None
+
+ try:
+ with open(schema_path, "rb") as xsd_file:
+ parser = lxml.etree.XMLParser()
+ xsd_doc = lxml.etree.parse(
+ xsd_file, parser=parser, base_url=str(schema_path)
+ )
+ schema = lxml.etree.XMLSchema(xsd_doc)
+
+ with open(xml_file, "r") as f:
+ xml_doc = lxml.etree.parse(f)
+
+ xml_doc, _ = self._remove_template_tags_from_text_nodes(xml_doc)
+ xml_doc = self._preprocess_for_mc_ignorable(xml_doc)
+
+ relative_path = xml_file.relative_to(base_path)
+ if (
+ relative_path.parts
+ and relative_path.parts[0] in self.MAIN_CONTENT_FOLDERS
+ ):
+ xml_doc = self._clean_ignorable_namespaces(xml_doc)
+
+ if schema.validate(xml_doc):
+ return True, set()
+ else:
+ errors = set()
+ for error in schema.error_log:
+ errors.add(error.message)
+ return False, errors
+
+ except Exception as e:
+ return False, {str(e)}
+
+ def _get_original_file_errors(self, xml_file):
+ if self.original_file is None:
+ return set()
+
+ import tempfile
+ import zipfile
+
+ xml_file = Path(xml_file).resolve()
+ unpacked_dir = self.unpacked_dir.resolve()
+ relative_path = xml_file.relative_to(unpacked_dir)
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ temp_path = Path(temp_dir)
+
+ with zipfile.ZipFile(self.original_file, "r") as zip_ref:
+ zip_ref.extractall(temp_path)
+
+ original_xml_file = temp_path / relative_path
+
+ if not original_xml_file.exists():
+ return set()
+
+ is_valid, errors = self._validate_single_file_xsd(
+ original_xml_file, temp_path
+ )
+ return errors if errors else set()
+
+ def _remove_template_tags_from_text_nodes(self, xml_doc):
+ warnings = []
+ template_pattern = re.compile(r"\{\{[^}]*\}\}")
+
+ xml_string = lxml.etree.tostring(xml_doc, encoding="unicode")
+ xml_copy = lxml.etree.fromstring(xml_string)
+
+ def process_text_content(text, content_type):
+ if not text:
+ return text
+ matches = list(template_pattern.finditer(text))
+ if matches:
+ for match in matches:
+ warnings.append(
+ f"Found template tag in {content_type}: {match.group()}"
+ )
+ return template_pattern.sub("", text)
+ return text
+
+ for elem in xml_copy.iter():
+ if not hasattr(elem, "tag") or callable(elem.tag):
+ continue
+ tag_str = str(elem.tag)
+ if tag_str.endswith("}t") or tag_str == "t":
+ continue
+
+ elem.text = process_text_content(elem.text, "text content")
+ elem.tail = process_text_content(elem.tail, "tail content")
+
+ return lxml.etree.ElementTree(xml_copy), warnings
+
+
+if __name__ == "__main__":
+ raise RuntimeError("This module should not be run directly.")
diff --git a/.claude/skills/xlsx/scripts/office/validators/docx.py b/.claude/skills/xlsx/scripts/office/validators/docx.py
new file mode 100644
index 0000000..fec405e
--- /dev/null
+++ b/.claude/skills/xlsx/scripts/office/validators/docx.py
@@ -0,0 +1,446 @@
+"""
+Validator for Word document XML files against XSD schemas.
+"""
+
+import random
+import re
+import tempfile
+import zipfile
+
+import defusedxml.minidom
+import lxml.etree
+
+from .base import BaseSchemaValidator
+
+
+class DOCXSchemaValidator(BaseSchemaValidator):
+
+ WORD_2006_NAMESPACE = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
+ W14_NAMESPACE = "http://schemas.microsoft.com/office/word/2010/wordml"
+ W16CID_NAMESPACE = "http://schemas.microsoft.com/office/word/2016/wordml/cid"
+
+ ELEMENT_RELATIONSHIP_TYPES = {}
+
+ def validate(self):
+ if not self.validate_xml():
+ return False
+
+ all_valid = True
+ if not self.validate_namespaces():
+ all_valid = False
+
+ if not self.validate_unique_ids():
+ all_valid = False
+
+ if not self.validate_file_references():
+ all_valid = False
+
+ if not self.validate_content_types():
+ all_valid = False
+
+ if not self.validate_against_xsd():
+ all_valid = False
+
+ if not self.validate_whitespace_preservation():
+ all_valid = False
+
+ if not self.validate_deletions():
+ all_valid = False
+
+ if not self.validate_insertions():
+ all_valid = False
+
+ if not self.validate_all_relationship_ids():
+ all_valid = False
+
+ if not self.validate_id_constraints():
+ all_valid = False
+
+ if not self.validate_comment_markers():
+ all_valid = False
+
+ self.compare_paragraph_counts()
+
+ return all_valid
+
+ def validate_whitespace_preservation(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+
+ for elem in root.iter(f"{{{self.WORD_2006_NAMESPACE}}}t"):
+ if elem.text:
+ text = elem.text
+ if re.search(r"^[ \t\n\r]", text) or re.search(
+ r"[ \t\n\r]$", text
+ ):
+ xml_space_attr = f"{{{self.XML_NAMESPACE}}}space"
+ if (
+ xml_space_attr not in elem.attrib
+ or elem.attrib[xml_space_attr] != "preserve"
+ ):
+ text_preview = (
+ repr(text)[:50] + "..."
+ if len(repr(text)) > 50
+ else repr(text)
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {elem.sourceline}: w:t element with whitespace missing xml:space='preserve': {text_preview}"
+ )
+
+ except (lxml.etree.XMLSyntaxError, Exception) as e:
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: Error: {e}"
+ )
+
+ if errors:
+ print(f"FAILED - Found {len(errors)} whitespace preservation violations:")
+ for error in errors:
+ print(error)
+ return False
+ else:
+ if self.verbose:
+ print("PASSED - All whitespace is properly preserved")
+ return True
+
+ def validate_deletions(self):
+ errors = []
+
+ for xml_file in self.xml_files:
+ if xml_file.name != "document.xml":
+ continue
+
+ try:
+ root = lxml.etree.parse(str(xml_file)).getroot()
+ namespaces = {"w": self.WORD_2006_NAMESPACE}
+
+ for t_elem in root.xpath(".//w:del//w:t", namespaces=namespaces):
+ if t_elem.text:
+ text_preview = (
+ repr(t_elem.text)[:50] + "..."
+ if len(repr(t_elem.text)) > 50
+ else repr(t_elem.text)
+ )
+ errors.append(
+ f" {xml_file.relative_to(self.unpacked_dir)}: "
+ f"Line {t_elem.sourceline}: found within