From 8ff9d31b3b968099be8e41324cab9c5ba4aac7c4 Mon Sep 17 00:00:00 2001 From: Akash-nath29 Date: Sun, 1 Feb 2026 04:28:22 +0530 Subject: [PATCH 1/3] Create Coderrr-skills --- coderrr-skills/.gitignore | 75 ++++ coderrr-skills/CONTRIBUTING.md | 204 +++++++++++ coderrr-skills/LICENSE | 21 ++ coderrr-skills/README.md | 149 ++++++++ coderrr-skills/registry.json | 326 ++++++++++++++++++ coderrr-skills/skills/api-client/Skills.md | 201 +++++++++++ .../skills/api-client/requirements.txt | 1 + .../skills/api-client/tools/http_get.py | 129 +++++++ .../skills/api-client/tools/http_post.py | 148 ++++++++ .../skills/api-client/tools/parse_response.py | 232 +++++++++++++ .../skills/artifacts-builder/Skills.md | 189 ++++++++++ .../artifacts-builder/tools/add_component.py | 186 ++++++++++ .../artifacts-builder/tools/build_artifact.py | 80 +++++ .../tools/preview_artifact.py | 82 +++++ .../tools/scaffold_artifact.py | 245 +++++++++++++ .../skills/brand-guidelines/Skills.md | 173 ++++++++++ .../brand-guidelines/tools/export_tokens.py | 131 +++++++ .../tools/generate_palette.py | 113 ++++++ .../brand-guidelines/tools/set_brand.py | 84 +++++ coderrr-skills/skills/code-analyzer/Skills.md | 175 ++++++++++ .../skills/code-analyzer/requirements.txt | 2 + .../skills/code-analyzer/tools/count_lines.py | 198 +++++++++++ .../skills/code-analyzer/tools/find_todos.py | 152 ++++++++ .../skills/code-analyzer/tools/lint_python.py | 186 ++++++++++ coderrr-skills/skills/docx/Skills.md | 160 +++++++++ coderrr-skills/skills/docx/requirements.txt | 1 + .../skills/docx/tools/analyze_docx.py | 95 +++++ .../skills/docx/tools/create_docx.py | 117 +++++++ coderrr-skills/skills/docx/tools/edit_docx.py | 108 ++++++ coderrr-skills/skills/docx/tools/read_docx.py | 148 ++++++++ coderrr-skills/skills/file-search/Skills.md | 138 ++++++++ .../skills/file-search/tools/file_stats.py | 153 ++++++++ .../skills/file-search/tools/find_files.py | 111 ++++++ .../file-search/tools/search_content.py | 162 +++++++++ .../skills/internal-comms/Skills.md | 210 +++++++++++ .../internal-comms/tools/announcement.py | 102 ++++++ .../internal-comms/tools/meeting_summary.py | 95 +++++ .../skills/internal-comms/tools/newsletter.py | 122 +++++++ .../internal-comms/tools/status_report.py | 123 +++++++ coderrr-skills/skills/json-tools/Skills.md | 168 +++++++++ .../skills/json-tools/tools/format_json.py | 106 ++++++ .../skills/json-tools/tools/query_json.py | 194 +++++++++++ .../skills/json-tools/tools/validate_json.py | 97 ++++++ coderrr-skills/skills/mcp-builder/Skills.md | 203 +++++++++++ .../skills/mcp-builder/tools/add_mcp_tool.py | 92 +++++ .../skills/mcp-builder/tools/init_mcp.py | 181 ++++++++++ .../skills/mcp-builder/tools/validate_mcp.py | 96 ++++++ coderrr-skills/skills/pdf/Skills.md | 186 ++++++++++ coderrr-skills/skills/pdf/requirements.txt | 3 + coderrr-skills/skills/pdf/tools/create_pdf.py | 114 ++++++ .../skills/pdf/tools/extract_pdf.py | 109 ++++++ coderrr-skills/skills/pdf/tools/merge_pdf.py | 59 ++++ coderrr-skills/skills/pdf/tools/pdf_info.py | 77 +++++ coderrr-skills/skills/pdf/tools/split_pdf.py | 89 +++++ coderrr-skills/skills/pptx/Skills.md | 146 ++++++++ coderrr-skills/skills/pptx/requirements.txt | 1 + .../skills/pptx/tools/analyze_pptx.py | 76 ++++ .../skills/pptx/tools/create_pptx.py | 131 +++++++ coderrr-skills/skills/pptx/tools/edit_pptx.py | 103 ++++++ coderrr-skills/skills/pptx/tools/read_pptx.py | 112 ++++++ coderrr-skills/skills/skill-creator/Skills.md | 158 +++++++++ .../skills/skill-creator/tools/add_tool.py | 158 +++++++++ .../skill-creator/tools/finalize_skill.py | 112 ++++++ .../skills/skill-creator/tools/init_skill.py | 98 ++++++ .../skill-creator/tools/list_templates.py | 113 ++++++ coderrr-skills/skills/web-scraper/Skills.md | 103 ++++++ .../skills/web-scraper/requirements.txt | 2 + .../skills/web-scraper/tools/extract_text.py | 127 +++++++ .../skills/web-scraper/tools/fetch_page.py | 95 +++++ .../skills/webapp-testing/Skills.md | 211 ++++++++++++ .../skills/webapp-testing/requirements.txt | 1 + .../skills/webapp-testing/tools/interact.py | 66 ++++ .../skills/webapp-testing/tools/navigate.py | 62 ++++ .../skills/webapp-testing/tools/screenshot.py | 69 ++++ .../webapp-testing/tools/start_browser.py | 78 +++++ .../skills/webapp-testing/tools/verify.py | 68 ++++ coderrr-skills/skills/xlsx/Skills.md | 169 +++++++++ coderrr-skills/skills/xlsx/requirements.txt | 1 + .../skills/xlsx/tools/analyze_xlsx.py | 90 +++++ .../skills/xlsx/tools/create_xlsx.py | 88 +++++ coderrr-skills/skills/xlsx/tools/edit_xlsx.py | 107 ++++++ coderrr-skills/skills/xlsx/tools/read_xlsx.py | 99 ++++++ 82 files changed, 9745 insertions(+) create mode 100644 coderrr-skills/.gitignore create mode 100644 coderrr-skills/CONTRIBUTING.md create mode 100644 coderrr-skills/LICENSE create mode 100644 coderrr-skills/README.md create mode 100644 coderrr-skills/registry.json create mode 100644 coderrr-skills/skills/api-client/Skills.md create mode 100644 coderrr-skills/skills/api-client/requirements.txt create mode 100644 coderrr-skills/skills/api-client/tools/http_get.py create mode 100644 coderrr-skills/skills/api-client/tools/http_post.py create mode 100644 coderrr-skills/skills/api-client/tools/parse_response.py create mode 100644 coderrr-skills/skills/artifacts-builder/Skills.md create mode 100644 coderrr-skills/skills/artifacts-builder/tools/add_component.py create mode 100644 coderrr-skills/skills/artifacts-builder/tools/build_artifact.py create mode 100644 coderrr-skills/skills/artifacts-builder/tools/preview_artifact.py create mode 100644 coderrr-skills/skills/artifacts-builder/tools/scaffold_artifact.py create mode 100644 coderrr-skills/skills/brand-guidelines/Skills.md create mode 100644 coderrr-skills/skills/brand-guidelines/tools/export_tokens.py create mode 100644 coderrr-skills/skills/brand-guidelines/tools/generate_palette.py create mode 100644 coderrr-skills/skills/brand-guidelines/tools/set_brand.py create mode 100644 coderrr-skills/skills/code-analyzer/Skills.md create mode 100644 coderrr-skills/skills/code-analyzer/requirements.txt create mode 100644 coderrr-skills/skills/code-analyzer/tools/count_lines.py create mode 100644 coderrr-skills/skills/code-analyzer/tools/find_todos.py create mode 100644 coderrr-skills/skills/code-analyzer/tools/lint_python.py create mode 100644 coderrr-skills/skills/docx/Skills.md create mode 100644 coderrr-skills/skills/docx/requirements.txt create mode 100644 coderrr-skills/skills/docx/tools/analyze_docx.py create mode 100644 coderrr-skills/skills/docx/tools/create_docx.py create mode 100644 coderrr-skills/skills/docx/tools/edit_docx.py create mode 100644 coderrr-skills/skills/docx/tools/read_docx.py create mode 100644 coderrr-skills/skills/file-search/Skills.md create mode 100644 coderrr-skills/skills/file-search/tools/file_stats.py create mode 100644 coderrr-skills/skills/file-search/tools/find_files.py create mode 100644 coderrr-skills/skills/file-search/tools/search_content.py create mode 100644 coderrr-skills/skills/internal-comms/Skills.md create mode 100644 coderrr-skills/skills/internal-comms/tools/announcement.py create mode 100644 coderrr-skills/skills/internal-comms/tools/meeting_summary.py create mode 100644 coderrr-skills/skills/internal-comms/tools/newsletter.py create mode 100644 coderrr-skills/skills/internal-comms/tools/status_report.py create mode 100644 coderrr-skills/skills/json-tools/Skills.md create mode 100644 coderrr-skills/skills/json-tools/tools/format_json.py create mode 100644 coderrr-skills/skills/json-tools/tools/query_json.py create mode 100644 coderrr-skills/skills/json-tools/tools/validate_json.py create mode 100644 coderrr-skills/skills/mcp-builder/Skills.md create mode 100644 coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py create mode 100644 coderrr-skills/skills/mcp-builder/tools/init_mcp.py create mode 100644 coderrr-skills/skills/mcp-builder/tools/validate_mcp.py create mode 100644 coderrr-skills/skills/pdf/Skills.md create mode 100644 coderrr-skills/skills/pdf/requirements.txt create mode 100644 coderrr-skills/skills/pdf/tools/create_pdf.py create mode 100644 coderrr-skills/skills/pdf/tools/extract_pdf.py create mode 100644 coderrr-skills/skills/pdf/tools/merge_pdf.py create mode 100644 coderrr-skills/skills/pdf/tools/pdf_info.py create mode 100644 coderrr-skills/skills/pdf/tools/split_pdf.py create mode 100644 coderrr-skills/skills/pptx/Skills.md create mode 100644 coderrr-skills/skills/pptx/requirements.txt create mode 100644 coderrr-skills/skills/pptx/tools/analyze_pptx.py create mode 100644 coderrr-skills/skills/pptx/tools/create_pptx.py create mode 100644 coderrr-skills/skills/pptx/tools/edit_pptx.py create mode 100644 coderrr-skills/skills/pptx/tools/read_pptx.py create mode 100644 coderrr-skills/skills/skill-creator/Skills.md create mode 100644 coderrr-skills/skills/skill-creator/tools/add_tool.py create mode 100644 coderrr-skills/skills/skill-creator/tools/finalize_skill.py create mode 100644 coderrr-skills/skills/skill-creator/tools/init_skill.py create mode 100644 coderrr-skills/skills/skill-creator/tools/list_templates.py create mode 100644 coderrr-skills/skills/web-scraper/Skills.md create mode 100644 coderrr-skills/skills/web-scraper/requirements.txt create mode 100644 coderrr-skills/skills/web-scraper/tools/extract_text.py create mode 100644 coderrr-skills/skills/web-scraper/tools/fetch_page.py create mode 100644 coderrr-skills/skills/webapp-testing/Skills.md create mode 100644 coderrr-skills/skills/webapp-testing/requirements.txt create mode 100644 coderrr-skills/skills/webapp-testing/tools/interact.py create mode 100644 coderrr-skills/skills/webapp-testing/tools/navigate.py create mode 100644 coderrr-skills/skills/webapp-testing/tools/screenshot.py create mode 100644 coderrr-skills/skills/webapp-testing/tools/start_browser.py create mode 100644 coderrr-skills/skills/webapp-testing/tools/verify.py create mode 100644 coderrr-skills/skills/xlsx/Skills.md create mode 100644 coderrr-skills/skills/xlsx/requirements.txt create mode 100644 coderrr-skills/skills/xlsx/tools/analyze_xlsx.py create mode 100644 coderrr-skills/skills/xlsx/tools/create_xlsx.py create mode 100644 coderrr-skills/skills/xlsx/tools/edit_xlsx.py create mode 100644 coderrr-skills/skills/xlsx/tools/read_xlsx.py diff --git a/coderrr-skills/.gitignore b/coderrr-skills/.gitignore new file mode 100644 index 0000000..a98b31a --- /dev/null +++ b/coderrr-skills/.gitignore @@ -0,0 +1,75 @@ +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Virtual environments +venv/ +ENV/ +env/ +.venv/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo +*~ +.project +.pydevproject +.settings/ + +# OS +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db +Desktop.ini + +# Testing +.pytest_cache/ +.coverage +htmlcov/ +.tox/ +.nox/ + +# Logs +*.log +logs/ + +# Local development +.env +.env.local +*.local + +# Temporary files +tmp/ +temp/ +*.tmp +*.bak + +# Node (if any tooling uses it) +node_modules/ + +# Compiled Python +*.pyc diff --git a/coderrr-skills/CONTRIBUTING.md b/coderrr-skills/CONTRIBUTING.md new file mode 100644 index 0000000..e8833ad --- /dev/null +++ b/coderrr-skills/CONTRIBUTING.md @@ -0,0 +1,204 @@ +# Contributing to Coderrr Skills + +Thank you for your interest in contributing to the Coderrr skills marketplace! This guide will help you create and submit your own skills. + +## šŸ“‹ Table of Contents + +- [Getting Started](#getting-started) +- [Skill Requirements](#skill-requirements) +- [Creating a New Skill](#creating-a-new-skill) +- [Testing Your Skill](#testing-your-skill) +- [Submitting a Pull Request](#submitting-a-pull-request) +- [Code of Conduct](#code-of-conduct) + +## Getting Started + +1. **Fork** this repository +2. **Clone** your fork locally +3. Create a new **branch** for your skill: `git checkout -b skill/your-skill-name` + +## Skill Requirements + +Every skill must meet these requirements: + +### Required Files + +``` +skills/your-skill-name/ +ā”œā”€ā”€ Skills.md # Required: Skill documentation +ā”œā”€ā”€ requirements.txt # Optional: Python dependencies +└── tools/ + └── your_tool.py # Required: At least one tool +``` + +### Skills.md Format + +Your `Skills.md` must include: + +```markdown +--- +name: your-skill-name +displayName: Your Skill Name +description: Brief description of what your skill does +version: 1.0.0 +author: Your Name +tags: + - tag1 + - tag2 +--- + +# Your Skill Name + +Detailed description of your skill. + +## Tools + +### tool_name + +Description of what this tool does. + +**Arguments:** +- `--arg1` (required): Description +- `--arg2` (optional): Description + +**Example:** +\`\`\`bash +python tools/tool_name.py --arg1 value +\`\`\` + +**Output:** +Description of output format +``` + +### Tool Requirements + +Each Python tool must: + +1. **Use argparse** for command-line arguments +2. **Include docstrings** explaining functionality +3. **Handle errors gracefully** with informative messages +4. **Output to stdout** for easy piping +5. **Return exit code 0** on success, non-zero on failure + +### Example Tool Structure + +```python +#!/usr/bin/env python3 +""" +Brief description of what this tool does. +""" + +import argparse +import sys +import json + + +def main(): + parser = argparse.ArgumentParser( + description='What this tool does' + ) + parser.add_argument('--input', required=True, help='Input description') + parser.add_argument('--format', default='json', help='Output format') + + args = parser.parse_args() + + try: + # Your tool logic here + result = process(args.input) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() +``` + +## Testing Your Skill + +Before submitting, test your skill locally: + +1. **Install Coderrr CLI** (if not already installed): + ```bash + npm install -g coderrr-cli + ``` + +2. **Install your skill locally**: + ```bash + coderrr install ./skills/your-skill-name + ``` + +3. **Test each tool**: + ```bash + python ~/.coderrr/skills/your-skill-name/tools/your_tool.py --help + ``` + +4. **Verify with the agent**: + ```bash + coderrr + > Use the your_tool to do something + ``` + +## Submitting a Pull Request + +1. **Update registry.json** with your skill metadata: + ```json + { + "your-skill-name": { + "name": "your-skill-name", + "displayName": "Your Skill Name", + "description": "What your skill does", + "version": "1.0.0", + "author": "Your Name", + "repository": "https://github.com/your-username/your-repo", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/your-skill-name", + "tools": ["tool1", "tool2"], + "tags": ["tag1", "tag2"] + } + } + ``` + +2. **Commit your changes**: + ```bash + git add . + git commit -m "Add your-skill-name skill" + ``` + +3. **Push to your fork**: + ```bash + git push origin skill/your-skill-name + ``` + +4. **Open a Pull Request** with: + - Clear title: `Add [skill-name] skill` + - Description of what your skill does + - List of tools included + - Any external dependencies + +### PR Checklist + +- [ ] Skills.md is complete with all required sections +- [ ] All tools have proper docstrings +- [ ] All tools handle errors gracefully +- [ ] requirements.txt lists all dependencies (if any) +- [ ] registry.json is updated with correct metadata +- [ ] Tools tested locally and working + +## Code of Conduct + +- **Be respectful** in all interactions +- **Write clean, readable code** with comments +- **Document thoroughly** for other users +- **Test before submitting** to avoid broken skills +- **No malicious code** - skills that harm users will be removed + +## Questions? + +If you have questions, feel free to: + +- Open an [issue](https://github.com/Akash-nath29/coderrr-skills/issues) +- Check existing skills for examples +- Read the [Coderrr CLI documentation](https://github.com/Akash-nath29/Coderrr) + +Thank you for contributing! šŸš€ diff --git a/coderrr-skills/LICENSE b/coderrr-skills/LICENSE new file mode 100644 index 0000000..baba5bc --- /dev/null +++ b/coderrr-skills/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Akash Nath + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/coderrr-skills/README.md b/coderrr-skills/README.md new file mode 100644 index 0000000..b7fa630 --- /dev/null +++ b/coderrr-skills/README.md @@ -0,0 +1,149 @@ +# Coderrr Skills Marketplace + +[![Skills](https://img.shields.io/badge/skills-15-blue)](https://github.com/Akash-nath29/coderrr-skills) +[![License](https://img.shields.io/badge/license-MIT-green)](LICENSE) +[![Python](https://img.shields.io/badge/python-3.8%2B-blue)](https://python.org) + +A marketplace of installable skills for [Coderrr CLI](https://github.com/Akash-nath29/coderrr) - the AI-powered coding assistant. + +## šŸš€ Quick Start + +Install skills directly from Coderrr CLI: + +```bash +# Install a skill +coderrr install web-scraper + +# List installed skills +coderrr skills + +# Search for skills +coderrr search pdf +``` + +## šŸ“¦ Available Skills + +| Skill | Description | Tools | +|-------|-------------|-------| +| **web-scraper** | Fetch, parse, and extract content from web pages | `fetch_page`, `extract_text` | +| **file-search** | Find files and search content within your filesystem | `find_files`, `search_content`, `file_stats` | +| **code-analyzer** | Analyze code quality, structure, and maintainability | `lint_python`, `count_lines`, `find_todos` | +| **json-tools** | Format, query, and validate JSON data | `format_json`, `query_json`, `validate_json` | +| **api-client** | Make HTTP requests and work with API responses | `http_get`, `http_post`, `parse_response` | +| **docx** | Create, edit, and analyze Word documents | `create_docx`, `read_docx`, `edit_docx`, `analyze_docx` | +| **pdf** | Comprehensive PDF toolkit for document manipulation | `extract_pdf`, `create_pdf`, `merge_pdf`, `split_pdf`, `pdf_info` | +| **pptx** | Create, edit, and analyze PowerPoint presentations | `create_pptx`, `read_pptx`, `edit_pptx`, `analyze_pptx` | +| **xlsx** | Create and manipulate Excel spreadsheets with formulas | `create_xlsx`, `read_xlsx`, `edit_xlsx`, `analyze_xlsx` | +| **skill-creator** | Interactive tool for building new custom skills | `init_skill`, `add_tool`, `finalize_skill`, `list_templates` | +| **artifacts-builder** | Build complex HTML artifacts using React and Tailwind | `scaffold_artifact`, `add_component`, `build_artifact`, `preview_artifact` | +| **mcp-builder** | Guide for creating high-quality MCP servers | `init_mcp`, `add_mcp_tool`, `validate_mcp` | +| **webapp-testing** | Test web applications using Playwright automation | `start_browser`, `navigate`, `interact`, `verify`, `screenshot` | +| **brand-guidelines** | Apply brand colors, typography, and design tokens | `set_brand`, `generate_palette`, `export_tokens` | +| **internal-comms** | Write status reports, newsletters, and announcements | `status_report`, `newsletter`, `announcement`, `meeting_summary` | + +## šŸŽÆ Skills by Category + +### šŸ“„ Document Processing +- **docx** - Word document handling +- **pdf** - PDF manipulation +- **pptx** - PowerPoint presentations +- **xlsx** - Excel spreadsheets + +### 🌐 Web & API +- **web-scraper** - Web page scraping +- **api-client** - HTTP requests +- **webapp-testing** - Browser automation + +### šŸ’» Development +- **code-analyzer** - Code quality analysis +- **json-tools** - JSON manipulation +- **file-search** - File system operations +- **skill-creator** - Skill development +- **mcp-builder** - MCP server creation + +### šŸŽØ Design & Communication +- **artifacts-builder** - HTML/React components +- **brand-guidelines** - Design tokens +- **internal-comms** - Team communications + +## šŸ“ Repository Structure + +``` +coderrr-skills/ +ā”œā”€ā”€ registry.json # Central skill registry +ā”œā”€ā”€ README.md # This file +ā”œā”€ā”€ CONTRIBUTING.md # Contribution guidelines +ā”œā”€ā”€ LICENSE # MIT License +└── skills/ + ā”œā”€ā”€ web-scraper/ + │ ā”œā”€ā”€ Skills.md # Skill documentation + │ ā”œā”€ā”€ requirements.txt + │ └── tools/ + │ ā”œā”€ā”€ fetch_page.py + │ └── extract_text.py + ā”œā”€ā”€ docx/ + │ ā”œā”€ā”€ Skills.md + │ ā”œā”€ā”€ requirements.txt + │ └── tools/ + │ ā”œā”€ā”€ create_docx.py + │ ā”œā”€ā”€ read_docx.py + │ ā”œā”€ā”€ edit_docx.py + │ └── analyze_docx.py + └── ... (other skills) +``` + +## šŸ› ļø Creating New Skills + +1. Fork this repository +2. Create a new skill directory under `skills/` +3. Add required files: + - `Skills.md` - Documentation with YAML frontmatter + - `tools/` - Python tool scripts + - `requirements.txt` - Dependencies (if any) +4. Update `registry.json` +5. Submit a pull request + +See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed guidelines. + +### Skill Structure + +```markdown +--- +name: my-skill +description: What this skill does and when to use it +--- + +Detailed documentation for the AI agent... +``` + +## šŸ“– Skills.md Format + +Each skill's `Skills.md` follows this structure: + +1. **YAML Frontmatter** - `name` and `description` +2. **Approach** - When to use which tool +3. **Tools** - Detailed documentation for each tool +4. **Common Patterns** - Usage examples +5. **Best Practices** - Guidelines for effective use +6. **Dependencies** - Required packages + +## šŸ¤ Contributing + +Contributions are welcome! Please read our [Contributing Guidelines](CONTRIBUTING.md) before submitting. + +### Ideas for New Skills +- Database connectors +- Cloud service integrations +- Image manipulation +- Markdown processing +- Git automation + +## šŸ“„ License + +MIT License - see [LICENSE](LICENSE) for details. + +## šŸ”— Links + +- [Coderrr CLI](https://github.com/Akash-nath29/coderrr) +- [Documentation](https://github.com/Akash-nath29/coderrr#readme) +- [Issue Tracker](https://github.com/Akash-nath29/coderrr-skills/issues) diff --git a/coderrr-skills/registry.json b/coderrr-skills/registry.json new file mode 100644 index 0000000..01d548d --- /dev/null +++ b/coderrr-skills/registry.json @@ -0,0 +1,326 @@ +{ + "version": "1.0", + "registry_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/registry.json", + "skills": { + "web-scraper": { + "name": "web-scraper", + "displayName": "Web Scraper", + "description": "Fetch, parse, and extract content from web pages. Use this skill when the user asks to scrape websites, extract text from URLs, parse HTML content, download web pages, or analyze website content.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/web-scraper", + "tools": [ + "fetch_page", + "extract_text" + ], + "tags": [ + "web", + "scraping", + "http", + "html", + "parsing" + ] + }, + "file-search": { + "name": "file-search", + "displayName": "File Search", + "description": "Find files and search content within your filesystem. Use this skill when the user asks to find files by name or pattern, search for text within files, get directory statistics, count files, or analyze folder structure.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/file-search", + "tools": [ + "find_files", + "search_content", + "file_stats" + ], + "tags": [ + "files", + "search", + "filesystem", + "grep", + "find" + ] + }, + "code-analyzer": { + "name": "code-analyzer", + "displayName": "Code Analyzer", + "description": "Analyze code quality, structure, and maintainability. Use this skill when the user asks to lint code, count lines of code, find TODO/FIXME comments, analyze code structure, check for issues, or audit a codebase.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/code-analyzer", + "tools": [ + "lint_python", + "count_lines", + "find_todos" + ], + "tags": [ + "code", + "analysis", + "linting", + "quality", + "python" + ] + }, + "json-tools": { + "name": "json-tools", + "displayName": "JSON Tools", + "description": "Format, query, and validate JSON data. Use this skill when the user asks to pretty-print JSON, extract values from JSON, validate JSON syntax, minify JSON, or work with nested JSON structures.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/json-tools", + "tools": [ + "format_json", + "query_json", + "validate_json" + ], + "tags": [ + "json", + "data", + "formatting", + "validation", + "query" + ] + }, + "api-client": { + "name": "api-client", + "displayName": "API Client", + "description": "Make HTTP requests and work with API responses. Use this skill when the user asks to call APIs, make HTTP GET/POST requests, test endpoints, fetch data from REST APIs, or parse and format API responses.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/api-client", + "tools": [ + "http_get", + "http_post", + "parse_response" + ], + "tags": [ + "api", + "http", + "rest", + "requests", + "web" + ] + }, + "docx": { + "name": "docx", + "displayName": "Word Documents", + "description": "Create, edit, and analyze Word documents with professional formatting. Use this skill when the user asks to create Word documents, add content to DOCX files, extract text from Word files, work with tables, headers, footers, or analyze document structure.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/docx", + "tools": [ + "create_docx", + "read_docx", + "edit_docx", + "analyze_docx" + ], + "tags": [ + "document", + "word", + "docx", + "office", + "writing" + ] + }, + "pdf": { + "name": "pdf", + "displayName": "PDF Toolkit", + "description": "Comprehensive PDF toolkit for document manipulation. Use this skill when the user asks to extract text from PDFs, create PDF documents, merge or split PDFs, extract tables from PDFs, work with PDF forms, or analyze PDF structure.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/pdf", + "tools": [ + "extract_pdf", + "create_pdf", + "merge_pdf", + "split_pdf", + "pdf_info" + ], + "tags": [ + "pdf", + "document", + "extract", + "merge", + "split" + ] + }, + "pptx": { + "name": "pptx", + "displayName": "PowerPoint", + "description": "Create, edit, and analyze PowerPoint presentations. Use this skill when the user asks to create slides, modify presentations, extract content from PPTX files, add speaker notes, or analyze presentation structure.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/pptx", + "tools": [ + "create_pptx", + "read_pptx", + "edit_pptx", + "analyze_pptx" + ], + "tags": [ + "presentation", + "powerpoint", + "slides", + "office" + ] + }, + "xlsx": { + "name": "xlsx", + "displayName": "Excel Spreadsheets", + "description": "Create and manipulate Excel spreadsheets with formulas and formatting. Use this skill when the user asks to create Excel files, read spreadsheet data, update cells, add formulas, format worksheets, or analyze Excel structure.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/xlsx", + "tools": [ + "create_xlsx", + "read_xlsx", + "edit_xlsx", + "analyze_xlsx" + ], + "tags": [ + "excel", + "spreadsheet", + "data", + "formulas", + "office" + ] + }, + "skill-creator": { + "name": "skill-creator", + "displayName": "Skill Creator", + "description": "Interactive tool for building new custom skills for Coderrr. Use this skill when the user wants to create a new skill, scaffold a skill structure, generate tool templates, or set up skill documentation.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/skill-creator", + "tools": [ + "init_skill", + "add_tool", + "finalize_skill", + "list_templates" + ], + "tags": [ + "meta", + "development", + "scaffolding", + "tools" + ] + }, + "artifacts-builder": { + "name": "artifacts-builder", + "displayName": "Artifacts Builder", + "description": "Build complex HTML artifacts using React, Tailwind CSS, and shadcn/ui components. Use this skill when the user wants to create polished UI components, interactive web widgets, dashboards, landing pages, or sophisticated HTML artifacts.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/artifacts-builder", + "tools": [ + "scaffold_artifact", + "add_component", + "build_artifact", + "preview_artifact" + ], + "tags": [ + "frontend", + "react", + "tailwind", + "ui", + "html" + ] + }, + "mcp-builder": { + "name": "mcp-builder", + "displayName": "MCP Builder", + "description": "Guide for creating high-quality MCP (Model Context Protocol) servers. Use this skill when the user wants to build an MCP server, create MCP tools, implement MCP resources, or integrate with MCP-compatible clients.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/mcp-builder", + "tools": [ + "init_mcp", + "add_mcp_tool", + "validate_mcp" + ], + "tags": [ + "mcp", + "protocol", + "server", + "integration" + ] + }, + "webapp-testing": { + "name": "webapp-testing", + "displayName": "Web App Testing", + "description": "Test local web applications using Playwright browser automation. Use this skill when the user wants to test web applications, automate browser interactions, take screenshots, verify UI elements, or run end-to-end tests.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/webapp-testing", + "tools": [ + "start_browser", + "navigate", + "interact", + "verify", + "screenshot" + ], + "tags": [ + "testing", + "browser", + "playwright", + "automation", + "e2e" + ] + }, + "brand-guidelines": { + "name": "brand-guidelines", + "displayName": "Brand Guidelines", + "description": "Apply official brand colors, typography, and design tokens to projects. Use this skill when the user wants to set up brand colors, configure typography, generate color palettes, create design tokens, or ensure brand consistency.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/brand-guidelines", + "tools": [ + "set_brand", + "generate_palette", + "export_tokens" + ], + "tags": [ + "design", + "branding", + "colors", + "typography", + "tokens" + ] + }, + "internal-comms": { + "name": "internal-comms", + "displayName": "Internal Communications", + "description": "Write internal communications like status reports, newsletters, announcements, and team updates. Use this skill when the user needs to draft status reports, write team newsletters, create announcements, or compose meeting summaries.", + "version": "1.0.0", + "author": "Akash Nath", + "repository": "https://github.com/Akash-nath29/coderrr-skills", + "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/internal-comms", + "tools": [ + "status_report", + "newsletter", + "announcement", + "meeting_summary" + ], + "tags": [ + "communication", + "reports", + "newsletters", + "documentation" + ] + } + } +} \ No newline at end of file diff --git a/coderrr-skills/skills/api-client/Skills.md b/coderrr-skills/skills/api-client/Skills.md new file mode 100644 index 0000000..0849b8f --- /dev/null +++ b/coderrr-skills/skills/api-client/Skills.md @@ -0,0 +1,201 @@ +--- +name: api-client +description: Make HTTP requests and work with API responses. Use this skill when the user asks to call APIs, make HTTP GET/POST requests, test endpoints, fetch data from REST APIs, or parse and format API responses. Supports custom headers, JSON payloads, and multiple output formats. +--- + +This skill provides HTTP client functionality for interacting with REST APIs. It handles authentication headers, JSON request bodies, error responses, and output formatting in JSON, table, or CSV formats. + +The user provides an API endpoint to call or response data to process. They may need to test APIs, fetch data, or parse responses into usable formats. + +## Approach + +Before invoking tools, understand the API interaction: +- **Fetch data**: Use `http_get` for reading resources +- **Send data**: Use `http_post` for creating/updating resources +- **Process results**: Use `parse_response` to extract or reformat data +- **Full workflow**: Chain all three for complete API interactions + +Consider authentication, headers, and expected response formats. + +## Tools + +### http_get + +Makes HTTP GET requests with optional custom headers. + +```bash +python tools/http_get.py --url [--headers ] [--timeout ] +``` + +**Arguments:** +- `--url` (required): Complete URL including protocol (https://...) +- `--headers` (optional): JSON string of headers (e.g., `'{"Authorization": "Bearer token"}'`) +- `--timeout` (optional): Request timeout in seconds (default: 30) + +**Output:** Response body. JSON responses are automatically pretty-printed. + +**Default headers included:** +- `User-Agent: Coderrr-API-Client/1.0` +- `Accept: application/json` + +**When to use:** +- Fetching resources from APIs +- Testing API endpoints +- Downloading JSON data +- Checking API availability + +--- + +### http_post + +Makes HTTP POST requests with JSON body. + +```bash +python tools/http_post.py --url --data [--headers ] [--timeout ] +``` + +**Arguments:** +- `--url` (required): Complete URL including protocol +- `--data` (required): JSON string of request body +- `--headers` (optional): JSON string of additional headers +- `--timeout` (optional): Request timeout in seconds (default: 30) + +**Output:** Response body. JSON responses are automatically pretty-printed. + +**Default headers included:** +- `Content-Type: application/json` +- `Accept: application/json` + +**When to use:** +- Creating new resources +- Submitting form data +- Authenticating with APIs +- Triggering actions + +--- + +### parse_response + +Parses JSON responses and formats or extracts data. + +```bash +python tools/parse_response.py [--data ] [--extract ] [--format ] +``` + +**Arguments:** +- `--data` (optional): JSON string to parse. If omitted, reads from stdin +- `--extract` (optional): Path expression to extract (e.g., `data.users[0].name`) +- `--format` (optional): Output format - `json`, `table`, or `csv` (default: json) + +**Output:** Formatted data according to specified format. + +**When to use:** +- Extracting specific fields from responses +- Converting JSON to readable tables +- Exporting data to CSV +- Processing piped API output + +## Common Patterns + +### Simple GET Request +```bash +python tools/http_get.py --url https://api.github.com/users/octocat +``` + +### Authenticated Request +```bash +python tools/http_get.py --url https://api.example.com/me --headers '{"Authorization": "Bearer YOUR_TOKEN"}' +``` + +### POST with JSON Data +```bash +python tools/http_post.py --url https://api.example.com/users --data '{"name": "John", "email": "john@example.com"}' +``` + +### Chained API Call with Extraction +```bash +python tools/http_get.py --url https://api.example.com/users | python tools/parse_response.py --extract "data[*].email" +``` + +### Format as Table +```bash +python tools/http_get.py --url https://api.example.com/users | python tools/parse_response.py --format table +``` + +### Export to CSV +```bash +python tools/http_get.py --url https://api.example.com/users | python tools/parse_response.py --format csv > users.csv +``` + +## Authentication Patterns + +### Bearer Token +```bash +--headers '{"Authorization": "Bearer YOUR_ACCESS_TOKEN"}' +``` + +### API Key (Header) +```bash +--headers '{"X-API-Key": "YOUR_API_KEY"}' +``` + +### Basic Auth (Base64) +```bash +--headers '{"Authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ="}' +``` + +### Multiple Headers +```bash +--headers '{"Authorization": "Bearer token", "X-Request-ID": "123", "Accept-Language": "en-US"}' +``` + +## Response Format Examples + +### JSON (default) +```json +{ + "id": 1, + "name": "John Doe", + "email": "john@example.com" +} +``` + +### Table +``` +| id | name | email | +|----|----------|------------------| +| 1 | John Doe | john@example.com | +| 2 | Jane Doe | jane@example.com | +``` + +### CSV +```csv +id,name,email +1,John Doe,john@example.com +2,Jane Doe,jane@example.com +``` + +## Best Practices + +1. **Always use HTTPS** - Never send credentials over HTTP +2. **Handle errors** - Check exit codes and stderr for failures +3. **Set appropriate timeouts** - Long for slow APIs, short for health checks +4. **Use extraction** - Don't process entire responses when you need one field +5. **Chain tools** - Pipe http_get to parse_response for clean workflows +6. **Escape JSON carefully** - Use single quotes around JSON strings in bash + +## Error Handling + +| Exit Code | Meaning | Recovery | +|-----------|---------|----------| +| 0 | Success | - | +| 1 | Invalid arguments or URL | Check URL format, header syntax | +| 2 | Network/connection error | Verify network, check timeout | +| 3 | HTTP error (4xx, 5xx) | Check authentication, request format | +| 4 | JSON parsing error | Verify response is valid JSON | + +**HTTP Error Details:** When HTTP 4xx/5xx occurs, the response body is still printed to stderr for debugging. + +## Dependencies + +Requires `requests>=2.28.0`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/api-client/requirements.txt b/coderrr-skills/skills/api-client/requirements.txt new file mode 100644 index 0000000..a8608b2 --- /dev/null +++ b/coderrr-skills/skills/api-client/requirements.txt @@ -0,0 +1 @@ +requests>=2.28.0 diff --git a/coderrr-skills/skills/api-client/tools/http_get.py b/coderrr-skills/skills/api-client/tools/http_get.py new file mode 100644 index 0000000..b254a16 --- /dev/null +++ b/coderrr-skills/skills/api-client/tools/http_get.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +""" +Make HTTP GET requests. + +This tool makes GET requests to URLs and outputs the response body. +JSON responses are automatically pretty-printed. + +Usage: + python http_get.py --url https://api.example.com/users + python http_get.py --url https://api.example.com/users --headers '{"Auth": "token"}' + +Exit Codes: + 0 - Success + 1 - Invalid arguments or URL + 2 - Network/connection error + 3 - HTTP error (4xx, 5xx) +""" + +import argparse +import sys +import json + +try: + import requests +except ImportError: + print("Error: 'requests' package is required. Install with: pip install requests", file=sys.stderr) + sys.exit(1) + + +def http_get(url: str, headers: dict = None, timeout: int = 30) -> str: + """ + Make an HTTP GET request. + + Args: + url: The URL to request + headers: Optional headers dictionary + timeout: Request timeout in seconds + + Returns: + Response body as string + """ + default_headers = { + 'User-Agent': 'Coderrr-API-Client/1.0', + 'Accept': 'application/json' + } + + if headers: + default_headers.update(headers) + + response = requests.get(url, headers=default_headers, timeout=timeout) + response.raise_for_status() + + return response.text + + +def main(): + parser = argparse.ArgumentParser( + description='Make HTTP GET requests', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python http_get.py --url https://api.example.com/users + python http_get.py --url https://api.example.com/data --headers '{"Authorization": "Bearer token"}' + ''' + ) + parser.add_argument( + '--url', + required=True, + help='The URL to request' + ) + parser.add_argument( + '--headers', + help='JSON string of headers' + ) + parser.add_argument( + '--timeout', + type=int, + default=30, + help='Request timeout in seconds (default: 30)' + ) + + args = parser.parse_args() + + # Parse headers if provided + headers = None + if args.headers: + try: + headers = json.loads(args.headers) + except json.JSONDecodeError as e: + print(f"Error: Invalid headers JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + response = http_get(args.url, headers, args.timeout) + + # Try to pretty-print if JSON + try: + data = json.loads(response) + print(json.dumps(data, indent=2, ensure_ascii=False)) + except json.JSONDecodeError: + # Not JSON, print as-is + print(response) + + except requests.exceptions.MissingSchema: + print(f"Error: Invalid URL. Include http:// or https://", file=sys.stderr) + sys.exit(1) + except requests.exceptions.ConnectionError as e: + print(f"Error: Connection failed - {e}", file=sys.stderr) + sys.exit(2) + except requests.exceptions.Timeout: + print(f"Error: Request timed out after {args.timeout} seconds", file=sys.stderr) + sys.exit(2) + except requests.exceptions.HTTPError as e: + print(f"Error: HTTP {e.response.status_code} - {e.response.reason}", file=sys.stderr) + # Still output the response body if available + if e.response.text: + try: + data = json.loads(e.response.text) + print(json.dumps(data, indent=2), file=sys.stderr) + except json.JSONDecodeError: + print(e.response.text, file=sys.stderr) + sys.exit(3) + except requests.exceptions.RequestException as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(2) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/api-client/tools/http_post.py b/coderrr-skills/skills/api-client/tools/http_post.py new file mode 100644 index 0000000..0c367a1 --- /dev/null +++ b/coderrr-skills/skills/api-client/tools/http_post.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +""" +Make HTTP POST requests with JSON body. + +This tool makes POST requests to URLs with JSON data and outputs +the response body. + +Usage: + python http_post.py --url https://api.example.com/users --data '{"name": "John"}' + +Exit Codes: + 0 - Success + 1 - Invalid arguments or URL + 2 - Network/connection error + 3 - HTTP error (4xx, 5xx) + 4 - Invalid JSON data +""" + +import argparse +import sys +import json + +try: + import requests +except ImportError: + print("Error: 'requests' package is required. Install with: pip install requests", file=sys.stderr) + sys.exit(1) + + +def http_post(url: str, data: dict, headers: dict = None, timeout: int = 30) -> str: + """ + Make an HTTP POST request with JSON body. + + Args: + url: The URL to request + data: Dictionary to send as JSON + headers: Optional additional headers + timeout: Request timeout in seconds + + Returns: + Response body as string + """ + default_headers = { + 'User-Agent': 'Coderrr-API-Client/1.0', + 'Content-Type': 'application/json', + 'Accept': 'application/json' + } + + if headers: + default_headers.update(headers) + + response = requests.post( + url, + json=data, + headers=default_headers, + timeout=timeout + ) + response.raise_for_status() + + return response.text + + +def main(): + parser = argparse.ArgumentParser( + description='Make HTTP POST requests with JSON body', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python http_post.py --url https://api.example.com/users --data '{"name": "John"}' + python http_post.py --url https://api.example.com/auth --data '{"user": "admin", "pass": "123"}' --headers '{"X-API-Key": "secret"}' + ''' + ) + parser.add_argument( + '--url', + required=True, + help='The URL to request' + ) + parser.add_argument( + '--data', + required=True, + help='JSON string of request body' + ) + parser.add_argument( + '--headers', + help='JSON string of additional headers' + ) + parser.add_argument( + '--timeout', + type=int, + default=30, + help='Request timeout in seconds (default: 30)' + ) + + args = parser.parse_args() + + # Parse data + try: + data = json.loads(args.data) + except json.JSONDecodeError as e: + print(f"Error: Invalid data JSON - {e}", file=sys.stderr) + sys.exit(4) + + # Parse headers if provided + headers = None + if args.headers: + try: + headers = json.loads(args.headers) + except json.JSONDecodeError as e: + print(f"Error: Invalid headers JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + response = http_post(args.url, data, headers, args.timeout) + + # Try to pretty-print if JSON + try: + resp_data = json.loads(response) + print(json.dumps(resp_data, indent=2, ensure_ascii=False)) + except json.JSONDecodeError: + # Not JSON, print as-is + print(response) + + except requests.exceptions.MissingSchema: + print(f"Error: Invalid URL. Include http:// or https://", file=sys.stderr) + sys.exit(1) + except requests.exceptions.ConnectionError as e: + print(f"Error: Connection failed - {e}", file=sys.stderr) + sys.exit(2) + except requests.exceptions.Timeout: + print(f"Error: Request timed out after {args.timeout} seconds", file=sys.stderr) + sys.exit(2) + except requests.exceptions.HTTPError as e: + print(f"Error: HTTP {e.response.status_code} - {e.response.reason}", file=sys.stderr) + # Still output the response body if available + if e.response.text: + try: + resp_data = json.loads(e.response.text) + print(json.dumps(resp_data, indent=2), file=sys.stderr) + except json.JSONDecodeError: + print(e.response.text, file=sys.stderr) + sys.exit(3) + except requests.exceptions.RequestException as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(2) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/api-client/tools/parse_response.py b/coderrr-skills/skills/api-client/tools/parse_response.py new file mode 100644 index 0000000..a6db5b5 --- /dev/null +++ b/coderrr-skills/skills/api-client/tools/parse_response.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python3 +""" +Parse and format API responses. + +This tool takes JSON response data and can extract specific fields +or format the output as JSON, table, or CSV. + +Usage: + python parse_response.py --data '{"user": {"name": "John"}}' + echo '{"data": [...]}' | python parse_response.py --extract "data" + python parse_response.py --data '[...]' --format table + +Exit Codes: + 0 - Success + 1 - Invalid arguments + 4 - JSON parsing error +""" + +import argparse +import sys +import json +import re +from typing import Any, List + + +def parse_path(path: str) -> List[Any]: + """Parse a path expression into components.""" + components = [] + pattern = r'\.?([^\.\[\]]+)|\[(\d+|\*)\]' + + for match in re.finditer(pattern, path): + if match.group(1): + components.append(match.group(1)) + elif match.group(2): + idx = match.group(2) + if idx == '*': + components.append('*') + else: + components.append(int(idx)) + + return components + + +def extract_value(data: Any, path: str) -> Any: + """Extract a value from data using a path expression.""" + if not path: + return data + + components = parse_path(path) + current = data + + for component in components: + if component == '*': + if isinstance(current, list): + return current + raise TypeError("Can't use [*] on non-array") + elif isinstance(component, int): + if not isinstance(current, list): + raise TypeError(f"Can't use [{component}] on non-array") + current = current[component] + else: + if not isinstance(current, dict): + raise TypeError(f"Can't access '{component}' on non-object") + current = current.get(component) + if current is None: + raise KeyError(f"Key '{component}' not found") + + return current + + +def format_as_table(data: Any) -> str: + """Format data as an ASCII table.""" + if isinstance(data, dict): + data = [data] + + if not isinstance(data, list) or not data: + return json.dumps(data, indent=2) + + if not isinstance(data[0], dict): + return json.dumps(data, indent=2) + + # Get all keys + keys = [] + for item in data: + for key in item.keys(): + if key not in keys: + keys.append(key) + + # Calculate column widths + widths = {key: len(str(key)) for key in keys} + for item in data: + for key in keys: + val = str(item.get(key, '')) + widths[key] = max(widths[key], len(val)) + + # Build table + lines = [] + + # Header + header = '| ' + ' | '.join(str(k).ljust(widths[k]) for k in keys) + ' |' + separator = '|-' + '-|-'.join('-' * widths[k] for k in keys) + '-|' + + lines.append(header) + lines.append(separator) + + # Rows + for item in data: + row = '| ' + ' | '.join(str(item.get(k, '')).ljust(widths[k]) for k in keys) + ' |' + lines.append(row) + + return '\n'.join(lines) + + +def format_as_csv(data: Any) -> str: + """Format data as CSV.""" + if isinstance(data, dict): + data = [data] + + if not isinstance(data, list) or not data: + return '' + + if not isinstance(data[0], dict): + # Simple array of values + return '\n'.join(str(item) for item in data) + + # Get all keys + keys = [] + for item in data: + for key in item.keys(): + if key not in keys: + keys.append(key) + + lines = [] + + # Header + lines.append(','.join(keys)) + + # Rows + for item in data: + values = [] + for key in keys: + val = str(item.get(key, '')) + # Escape commas and quotes + if ',' in val or '"' in val or '\n' in val: + val = '"' + val.replace('"', '""') + '"' + values.append(val) + lines.append(','.join(values)) + + return '\n'.join(lines) + + +def parse_response(data_str: str, extract: str = None, output_format: str = 'json') -> str: + """ + Parse and format response data. + + Args: + data_str: JSON string to parse + extract: Optional path to extract + output_format: Output format (json, table, csv) + + Returns: + Formatted output string + """ + data = json.loads(data_str) + + # Extract if path provided + if extract: + data = extract_value(data, extract) + + # Format output + if output_format == 'table': + return format_as_table(data) + elif output_format == 'csv': + return format_as_csv(data) + else: + return json.dumps(data, indent=2, ensure_ascii=False) + + +def main(): + parser = argparse.ArgumentParser( + description='Parse and format API responses', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python parse_response.py --data '{"user": {"name": "John"}}' --extract "user.name" + echo '{"items": [...]}' | python parse_response.py --extract "items" --format table + python parse_response.py --data '[{"id":1},{"id":2}]' --format csv + ''' + ) + parser.add_argument( + '--data', + help='JSON response data (if not provided, reads from stdin)' + ) + parser.add_argument( + '--extract', + help='Path to extract specific field (e.g., "data.users[0].name")' + ) + parser.add_argument( + '--format', + choices=['json', 'table', 'csv'], + default='json', + help='Output format (default: json)' + ) + + args = parser.parse_args() + + # Get data from argument or stdin + if args.data: + data_str = args.data + else: + if sys.stdin.isatty(): + print("Error: No data provided. Use --data or pipe JSON to stdin.", file=sys.stderr) + sys.exit(1) + data_str = sys.stdin.read() + + if not data_str.strip(): + print("Error: Empty input", file=sys.stderr) + sys.exit(1) + + try: + result = parse_response(data_str, args.extract, args.format) + print(result) + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON - {e}", file=sys.stderr) + sys.exit(4) + except (KeyError, TypeError, IndexError) as e: + print(f"Error extracting path: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/artifacts-builder/Skills.md b/coderrr-skills/skills/artifacts-builder/Skills.md new file mode 100644 index 0000000..a41c580 --- /dev/null +++ b/coderrr-skills/skills/artifacts-builder/Skills.md @@ -0,0 +1,189 @@ +--- +name: artifacts-builder +description: Build complex HTML artifacts using React, Tailwind CSS, and shadcn/ui components. Use this skill when the user wants to create polished UI components, interactive web widgets, dashboards, landing pages, or sophisticated HTML artifacts with modern styling and interactivity. +--- + +This skill guides creation of production-quality HTML artifacts using modern frontend technologies. It helps scaffold React components with Tailwind CSS styling and shadcn/ui components for professional, consistent design. + +The user wants to create an HTML artifact. They may specify the type (component, page, widget, dashboard), design requirements, and interactivity needs. + +## Approach + +When building artifacts: +1. **Analyze requirements**: Understand the UI/UX goals +2. **Choose architecture**: Single HTML vs multi-file component +3. **Select components**: Pick appropriate shadcn/ui components +4. **Build progressively**: Start with structure, add styling, then interactivity + +## Tools + +### scaffold_artifact + +Creates the initial artifact structure with appropriate boilerplate. + +```bash +python tools/scaffold_artifact.py --name --type --output-dir [--features ] +``` + +**Arguments:** +- `--name` (required): Artifact name (used for file naming) +- `--type` (required): Artifact type - `component`, `page`, `widget`, `dashboard` +- `--output-dir` (required): Output directory +- `--features` (optional): Comma-separated features - `dark-mode`, `animations`, `responsive` + +**Types:** +- `component` - Reusable React component +- `page` - Full page layout +- `widget` - Self-contained interactive widget +- `dashboard` - Data dashboard with charts/tables + +**When to use:** +- Starting a new artifact +- Getting proper boilerplate +- Setting up the file structure + +--- + +### add_component + +Adds pre-built component templates to an artifact. + +```bash +python tools/add_component.py --artifact-dir --component [--variant ] +``` + +**Arguments:** +- `--artifact-dir` (required): Path to artifact directory +- `--component` (required): Component to add (see list below) +- `--variant` (optional): Component variant/style + +**Available Components:** +- `button` - Interactive buttons (variants: primary, secondary, outline, ghost) +- `card` - Content card with header/body/footer +- `dialog` - Modal dialog +- `dropdown` - Dropdown menu +- `form` - Form with validation +- `table` - Data table with sorting +- `tabs` - Tabbed interface +- `navbar` - Navigation bar +- `sidebar` - Side navigation +- `chart` - Data visualization (variants: line, bar, pie) + +**When to use:** +- Adding UI components +- Building layouts +- Including interactive elements + +--- + +### build_artifact + +Compiles the artifact into a single, deliverable HTML file. + +```bash +python tools/build_artifact.py --artifact-dir --output [--minify] +``` + +**Arguments:** +- `--artifact-dir` (required): Path to artifact directory +- `--output` (required): Output HTML file path +- `--minify` (optional): Minify the output + +**When to use:** +- Creating final deliverable +- Bundling for distribution +- Generating standalone HTML + +--- + +### preview_artifact + +Generates a preview of the artifact. + +```bash +python tools/preview_artifact.py --artifact-dir [--port ] +``` + +**Arguments:** +- `--artifact-dir` (required): Path to artifact directory +- `--port` (optional): Preview server port (default: 3000) + +**When to use:** +- Testing the artifact +- Visual verification +- Development iteration + +## Design Guidelines + +### Visual Excellence +- Use rich color palettes, not generic defaults +- Implement smooth animations and transitions +- Apply generous whitespace for breathing room +- Choose distinctive typography + +### Modern Aesthetics +- Glassmorphism effects for depth +- Gradient backgrounds and accents +- Subtle shadows and blur effects +- Micro-interactions on hover/focus + +### Responsive Design +- Mobile-first approach +- Flexible grids and layouts +- Appropriate breakpoints +- Touch-friendly interactions + +## Common Patterns + +### Create Landing Page +```bash +python tools/scaffold_artifact.py --name landing --type page --output-dir ./artifacts --features responsive,animations +python tools/add_component.py --artifact-dir ./artifacts/landing --component navbar +python tools/add_component.py --artifact-dir ./artifacts/landing --component button --variant primary +python tools/build_artifact.py --artifact-dir ./artifacts/landing --output ./landing.html +``` + +### Create Dashboard Widget +```bash +python tools/scaffold_artifact.py --name metrics --type widget --output-dir ./artifacts +python tools/add_component.py --artifact-dir ./artifacts/metrics --component card +python tools/add_component.py --artifact-dir ./artifacts/metrics --component chart --variant line +python tools/build_artifact.py --artifact-dir ./artifacts/metrics --output ./widget.html +``` + +## Best Practices + +1. **Start with scaffold** - Get proper boilerplate and structure +2. **Use components** - Don't build from scratch when components exist +3. **Test responsiveness** - Check at multiple breakpoints +4. **Preview before build** - Catch issues early +5. **Minify for production** - Smaller file size for delivery + +## Color Palette Suggestions + +Avoid generic colors. Use these curated palettes: + +**Professional Dark:** +- Background: `#0f172a` +- Surface: `#1e293b` +- Primary: `#3b82f6` +- Accent: `#f472b6` + +**Warm Light:** +- Background: `#fef7ee` +- Surface: `#ffffff` +- Primary: `#ea580c` +- Accent: `#0ea5e9` + +**Modern Neutral:** +- Background: `#18181b` +- Surface: `#27272a` +- Primary: `#a78bfa` +- Accent: `#34d399` + +## Dependencies + +Generates self-contained HTML with embedded: +- React 18 (via CDN) +- Tailwind CSS (via CDN) +- shadcn/ui component styles diff --git a/coderrr-skills/skills/artifacts-builder/tools/add_component.py b/coderrr-skills/skills/artifacts-builder/tools/add_component.py new file mode 100644 index 0000000..6083313 --- /dev/null +++ b/coderrr-skills/skills/artifacts-builder/tools/add_component.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +""" +Add component templates to an artifact. + +Usage: + python add_component.py --artifact-dir ./my-artifact --component button --variant primary +""" + +import argparse +import sys +import json +from pathlib import Path + + +COMPONENTS = { + "button": { + "primary": ''' +const Button = ({ children, onClick, disabled }) => ( + +); +''', + "secondary": ''' +const Button = ({ children, onClick, disabled }) => ( + +); +''', + "outline": ''' +const Button = ({ children, onClick, disabled }) => ( + +); +''' + }, + "card": { + "default": ''' +const Card = ({ title, children, footer }) => ( +
+ {title && ( +
+

{title}

+
+ )} +
{children}
+ {footer && ( +
+ {footer} +
+ )} +
+); +''' + }, + "navbar": { + "default": ''' +const Navbar = ({ logo, links }) => ( + +); +''' + }, + "table": { + "default": ''' +const Table = ({ headers, rows }) => ( +
+ + + + {headers?.map((header, i) => ( + + ))} + + + + {rows?.map((row, i) => ( + + {row.map((cell, j) => ( + + ))} + + ))} + +
+ {header} +
+ {cell} +
+
+); +''' + } +} + + +def add_component(artifact_dir: str, component: str, variant: str = None): + """Add a component to the artifact.""" + artifact_path = Path(artifact_dir) + + if not artifact_path.exists(): + raise ValueError(f"Artifact directory not found: {artifact_dir}") + + if component not in COMPONENTS: + return { + "error": f"Unknown component: {component}", + "available": list(COMPONENTS.keys()) + } + + variants = COMPONENTS[component] + variant = variant or "default" + if variant not in variants: + variant = list(variants.keys())[0] + + component_code = variants[variant] + + # Update components.json + config_file = artifact_path / 'components.json' + if config_file.exists(): + config = json.loads(config_file.read_text()) + else: + config = {"components": []} + + config["components"].append({ + "name": component, + "variant": variant + }) + config_file.write_text(json.dumps(config, indent=2)) + + # Save component code + components_dir = artifact_path / 'components' + components_dir.mkdir(exist_ok=True) + (components_dir / f'{component}.jsx').write_text(component_code) + + return { + "status": "success", + "component": component, + "variant": variant, + "file": str(components_dir / f'{component}.jsx') + } + + +def main(): + parser = argparse.ArgumentParser(description='Add component to artifact') + parser.add_argument('--artifact-dir', required=True, help='Artifact directory') + parser.add_argument('--component', required=True, help='Component name') + parser.add_argument('--variant', help='Component variant') + + args = parser.parse_args() + + try: + result = add_component(args.artifact_dir, args.component, args.variant) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/artifacts-builder/tools/build_artifact.py b/coderrr-skills/skills/artifacts-builder/tools/build_artifact.py new file mode 100644 index 0000000..4506738 --- /dev/null +++ b/coderrr-skills/skills/artifacts-builder/tools/build_artifact.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +""" +Build artifact into single HTML file. + +Usage: + python build_artifact.py --artifact-dir ./my-artifact --output ./output.html +""" + +import argparse +import sys +import json +from pathlib import Path +import re + + +def build_artifact(artifact_dir: str, output: str, minify: bool = False): + """Build artifact into single HTML file.""" + artifact_path = Path(artifact_dir) + + if not artifact_path.exists(): + raise ValueError(f"Artifact directory not found: {artifact_dir}") + + index_file = artifact_path / 'index.html' + if not index_file.exists(): + raise ValueError("index.html not found in artifact directory") + + html = index_file.read_text() + + # Collect component code + components_dir = artifact_path / 'components' + if components_dir.exists(): + component_code = [] + for comp_file in components_dir.glob('*.jsx'): + component_code.append(comp_file.read_text()) + + if component_code: + # Inject components before main React code + combined = '\n'.join(component_code) + html = html.replace( + ' + + + + + + + +
+ + + + +''' + +REACT_TEMPLATES = { + "component": ''' +const App = () => { + return ( +
+
+

+ Component Title +

+

+ Your component content goes here. +

+
+
+ ); +}; + +ReactDOM.createRoot(document.getElementById('root')).render(); +''', + "page": ''' +const Navbar = () => ( + +); + +const Hero = () => ( +
+
+

+ Welcome to Your Page +

+

+ A beautiful, modern landing page built with React and Tailwind CSS. +

+ +
+
+); + +const App = () => ( + <> + + + +); + +ReactDOM.createRoot(document.getElementById('root')).render(); +''', + "widget": ''' +const Widget = () => { + const [count, setCount] = React.useState(0); + + return ( +
+
+
+
{count}
+

Counter Value

+
+ + +
+
+
+
+ ); +}; + +ReactDOM.createRoot(document.getElementById('root')).render(); +''', + "dashboard": ''' +const StatCard = ({ title, value, change }) => ( +
+

{title}

+

{value}

+

= 0 ? 'text-green-500' : 'text-red-500'}`}> + {change >= 0 ? '↑' : '↓'} {Math.abs(change)}% +

+
+); + +const Dashboard = () => { + const stats = [ + { title: 'Total Revenue', value: '$45,231', change: 12.5 }, + { title: 'Active Users', value: '2,345', change: 8.1 }, + { title: 'Conversion Rate', value: '3.2%', change: -2.4 }, + { title: 'Avg Session', value: '4m 32s', change: 15.3 }, + ]; + + return ( +
+

Dashboard

+
+ {stats.map((stat, i) => ( + + ))} +
+
+ ); +}; + +ReactDOM.createRoot(document.getElementById('root')).render(); +''' +} + + +def scaffold_artifact(name: str, artifact_type: str, output_dir: str, features: list = None): + """Create artifact scaffold.""" + features = features or [] + artifact_dir = Path(output_dir) / name + artifact_dir.mkdir(parents=True, exist_ok=True) + + # Generate HTML + dark_class = 'dark' if 'dark-mode' in features else '' + animations = 'transition-all duration-300' if 'animations' in features else '' + custom_styles = '' + + react_code = REACT_TEMPLATES.get(artifact_type, REACT_TEMPLATES['component']) + + html = HTML_TEMPLATE.format( + title=name.replace('-', ' ').title(), + dark_class=dark_class, + animations=animations, + custom_styles=custom_styles, + react_code=react_code + ) + + # Write files + (artifact_dir / 'index.html').write_text(html) + (artifact_dir / 'components.json').write_text(json.dumps({ + "name": name, + "type": artifact_type, + "features": features, + "components": [] + }, indent=2)) + + return { + "status": "success", + "artifact_dir": str(artifact_dir), + "files": [ + str(artifact_dir / 'index.html'), + str(artifact_dir / 'components.json') + ] + } + + +def main(): + parser = argparse.ArgumentParser(description='Scaffold an artifact') + parser.add_argument('--name', required=True, help='Artifact name') + parser.add_argument('--type', required=True, choices=['component', 'page', 'widget', 'dashboard']) + parser.add_argument('--output-dir', required=True, help='Output directory') + parser.add_argument('--features', help='Comma-separated features') + + args = parser.parse_args() + features = args.features.split(',') if args.features else [] + + try: + result = scaffold_artifact(args.name, args.type, args.output_dir, features) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/brand-guidelines/Skills.md b/coderrr-skills/skills/brand-guidelines/Skills.md new file mode 100644 index 0000000..99e5fb4 --- /dev/null +++ b/coderrr-skills/skills/brand-guidelines/Skills.md @@ -0,0 +1,173 @@ +--- +name: brand-guidelines +description: Apply official brand colors, typography, and design tokens to projects. Use this skill when the user wants to set up brand colors, configure typography, generate color palettes, create design tokens, or ensure brand consistency across a project. +--- + +This skill helps maintain brand consistency by managing colors, typography, and design tokens. It generates CSS variables, config files, and documentation for brand implementation. + +The user wants to apply brand styling to a project. They may provide brand colors, font choices, or want to generate a cohesive palette. + +## Approach + +When setting up brand guidelines: +1. **Define**: Use `set_brand` to configure core brand values +2. **Generate**: Use `generate_palette` for extended colors +3. **Export**: Use `export_tokens` for CSS/SCSS/JSON/Tailwind +4. **Document**: Use `create_styleguide` for team reference + +## Tools + +### set_brand + +Configures core brand values. + +```bash +python tools/set_brand.py --name --primary --secondary [--accent ] [--fonts ] +``` + +**Arguments:** +- `--name` (required): Brand/project name +- `--primary` (required): Primary brand color (hex) +- `--secondary` (required): Secondary color (hex) +- `--accent` (optional): Accent color (hex) +- `--fonts` (optional): Font configuration JSON + +**Fonts JSON:** +```json +{ + "heading": "Outfit", + "body": "Inter", + "mono": "JetBrains Mono" +} +``` + +**When to use:** +- Starting a new project +- Updating brand colors +- Setting typography + +--- + +### generate_palette + +Generates extended color palette from brand colors. + +```bash +python tools/generate_palette.py --brand [--include ] +``` + +**Arguments:** +- `--brand` (required): Brand name (from set_brand) +- `--include` (optional): What to generate (default: both) + +**Generates:** +- **Shades**: 50-950 scale for each color +- **Semantic**: success, warning, error, info colors + +**When to use:** +- Creating full color system +- Generating consistent shades +- Adding semantic colors + +--- + +### export_tokens + +Exports design tokens in various formats. + +```bash +python tools/export_tokens.py --brand --format --output +``` + +**Arguments:** +- `--brand` (required): Brand name +- `--format` (required): Output format - `css`, `scss`, `json`, `tailwind`, `figma` +- `--output` (required): Output file path + +**CSS Output Example:** +```css +:root { + --color-primary: #3b82f6; + --color-primary-50: #eff6ff; + --color-primary-500: #3b82f6; + --font-heading: 'Outfit', sans-serif; +} +``` + +**When to use:** +- Integrating with existing projects +- Setting up Tailwind config +- Sharing with design tools + +--- + +### create_styleguide + +Generates brand documentation. + +```bash +python tools/create_styleguide.py --brand --output [--format ] +``` + +**Arguments:** +- `--brand` (required): Brand name +- `--output` (required): Output file/directory +- `--format` (optional): Guide format (default: html) + +**When to use:** +- Documenting brand for team +- Creating design reference +- Onboarding designers + +## Common Patterns + +### Complete Brand Setup +```bash +# Define brand +python tools/set_brand.py --name myproject --primary "#6366f1" --secondary "#64748b" --accent "#f43f5e" --fonts '{"heading": "Outfit", "body": "Inter"}' + +# Generate extended palette +python tools/generate_palette.py --brand myproject --include both + +# Export for Tailwind +python tools/export_tokens.py --brand myproject --format tailwind --output tailwind.config.js + +# Create documentation +python tools/create_styleguide.py --brand myproject --output ./docs/brand +``` + +### Quick CSS Variables +```bash +python tools/set_brand.py --name quick --primary "#0ea5e9" --secondary "#1e293b" +python tools/export_tokens.py --brand quick --format css --output variables.css +``` + +## Color Guidelines + +**Primary**: Main brand color, buttons, links, key UI elements +**Secondary**: Supporting color, backgrounds, borders +**Accent**: Call-to-action, highlights, notifications + +**Shade Scale:** +- 50: Lightest (backgrounds) +- 100-200: Light variants +- 300-400: Muted variants +- 500: Base color +- 600-700: Darker variants +- 800-900: Darkest (text on light) +- 950: Near-black variant + +## Typography Guidelines + +**Heading fonts**: Display, expressive +- Outfit, Space Grotesk, Clash Display, Satoshi + +**Body fonts**: Readable, neutral +- Inter, Source Sans, Nunito Sans, DM Sans + +**Mono fonts**: Code, technical +- JetBrains Mono, Fira Code, IBM Plex Mono + +## Dependencies + +Uses Python standard library with optional `colormath` for advanced color operations. diff --git a/coderrr-skills/skills/brand-guidelines/tools/export_tokens.py b/coderrr-skills/skills/brand-guidelines/tools/export_tokens.py new file mode 100644 index 0000000..374595c --- /dev/null +++ b/coderrr-skills/skills/brand-guidelines/tools/export_tokens.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +""" +Export design tokens. + +Usage: + python export_tokens.py --brand myproject --format css --output variables.css +""" + +import argparse +import sys +import json +from pathlib import Path + + +BRANDS_DIR = Path.home() / '.coderrr' / 'brands' + + +def export_css(brand: dict) -> str: + """Export as CSS variables.""" + lines = [":root {"] + + # Colors + palette = brand.get("palette", {}).get("colors", {}) + for color_name, shades in palette.items(): + if isinstance(shades, dict): + for shade, value in shades.items(): + lines.append(f" --color-{color_name}-{shade}: {value};") + else: + lines.append(f" --color-{color_name}: {shades};") + + # Fonts + fonts = brand.get("fonts", {}) + for font_type, font_name in fonts.items(): + lines.append(f" --font-{font_type}: '{font_name}', sans-serif;") + + lines.append("}") + return '\n'.join(lines) + + +def export_scss(brand: dict) -> str: + """Export as SCSS variables.""" + lines = [] + + palette = brand.get("palette", {}).get("colors", {}) + for color_name, shades in palette.items(): + if isinstance(shades, dict): + for shade, value in shades.items(): + lines.append(f"${color_name}-{shade}: {value};") + else: + lines.append(f"${color_name}: {shades};") + + fonts = brand.get("fonts", {}) + for font_type, font_name in fonts.items(): + lines.append(f"$font-{font_type}: '{font_name}', sans-serif;") + + return '\n'.join(lines) + + +def export_tailwind(brand: dict) -> str: + """Export as Tailwind config.""" + config = { + "theme": { + "extend": { + "colors": {}, + "fontFamily": {} + } + } + } + + palette = brand.get("palette", {}).get("colors", {}) + for color_name, shades in palette.items(): + config["theme"]["extend"]["colors"][color_name] = shades + + fonts = brand.get("fonts", {}) + for font_type, font_name in fonts.items(): + config["theme"]["extend"]["fontFamily"][font_type] = [font_name, "sans-serif"] + + return f"module.exports = {json.dumps(config, indent=2)}" + + +def export_tokens(brand_name: str, format_type: str, output: str): + """Export design tokens.""" + brand_file = BRANDS_DIR / f"{brand_name}.json" + + if not brand_file.exists(): + raise ValueError(f"Brand not found: {brand_name}") + + brand = json.loads(brand_file.read_text()) + + exporters = { + 'css': export_css, + 'scss': export_scss, + 'tailwind': export_tailwind, + 'json': lambda b: json.dumps(b, indent=2) + } + + if format_type not in exporters: + raise ValueError(f"Unknown format: {format_type}") + + content = exporters[format_type](brand) + + output_path = Path(output) + output_path.parent.mkdir(parents=True, exist_ok=True) + output_path.write_text(content) + + return { + "status": "success", + "brand": brand_name, + "format": format_type, + "output": str(output_path) + } + + +def main(): + parser = argparse.ArgumentParser(description='Export design tokens') + parser.add_argument('--brand', required=True, help='Brand name') + parser.add_argument('--format', required=True, choices=['css', 'scss', 'json', 'tailwind']) + parser.add_argument('--output', required=True, help='Output file path') + + args = parser.parse_args() + + try: + result = export_tokens(args.brand, args.format, args.output) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/brand-guidelines/tools/generate_palette.py b/coderrr-skills/skills/brand-guidelines/tools/generate_palette.py new file mode 100644 index 0000000..58a9296 --- /dev/null +++ b/coderrr-skills/skills/brand-guidelines/tools/generate_palette.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +""" +Generate extended color palette. + +Usage: + python generate_palette.py --brand myproject --include both +""" + +import argparse +import sys +import json +from pathlib import Path + + +BRANDS_DIR = Path.home() / '.coderrr' / 'brands' + + +def hex_to_rgb(hex_color: str) -> tuple: + """Convert hex to RGB.""" + hex_color = hex_color.lstrip('#') + return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) + + +def rgb_to_hex(rgb: tuple) -> str: + """Convert RGB to hex.""" + return '#{:02x}{:02x}{:02x}'.format(*[max(0, min(255, int(c))) for c in rgb]) + + +def lighten(hex_color: str, amount: float) -> str: + """Lighten a color.""" + r, g, b = hex_to_rgb(hex_color) + r = r + (255 - r) * amount + g = g + (255 - g) * amount + b = b + (255 - b) * amount + return rgb_to_hex((r, g, b)) + + +def darken(hex_color: str, amount: float) -> str: + """Darken a color.""" + r, g, b = hex_to_rgb(hex_color) + r = r * (1 - amount) + g = g * (1 - amount) + b = b * (1 - amount) + return rgb_to_hex((r, g, b)) + + +def generate_shades(base_color: str) -> dict: + """Generate 50-950 shade scale.""" + return { + "50": lighten(base_color, 0.95), + "100": lighten(base_color, 0.9), + "200": lighten(base_color, 0.75), + "300": lighten(base_color, 0.6), + "400": lighten(base_color, 0.3), + "500": base_color, + "600": darken(base_color, 0.1), + "700": darken(base_color, 0.25), + "800": darken(base_color, 0.4), + "900": darken(base_color, 0.55), + "950": darken(base_color, 0.7) + } + + +def generate_palette(brand_name: str, include: str = 'both'): + """Generate extended color palette.""" + brand_file = BRANDS_DIR / f"{brand_name}.json" + + if not brand_file.exists(): + raise ValueError(f"Brand not found: {brand_name}") + + brand = json.loads(brand_file.read_text()) + palette = {"colors": {}} + + if include in ['shades', 'both']: + # Generate shades for each brand color + for name, color in brand.get("colors", {}).items(): + palette["colors"][name] = generate_shades(color) + + if include in ['semantic', 'both']: + # Add semantic colors + palette["colors"]["success"] = generate_shades("#22c55e") + palette["colors"]["warning"] = generate_shades("#eab308") + palette["colors"]["error"] = generate_shades("#ef4444") + palette["colors"]["info"] = generate_shades("#3b82f6") + + # Update brand file with palette + brand["palette"] = palette + brand_file.write_text(json.dumps(brand, indent=2)) + + return { + "status": "success", + "brand": brand_name, + "generated": list(palette["colors"].keys()) + } + + +def main(): + parser = argparse.ArgumentParser(description='Generate color palette') + parser.add_argument('--brand', required=True, help='Brand name') + parser.add_argument('--include', default='both', choices=['shades', 'semantic', 'both']) + + args = parser.parse_args() + + try: + result = generate_palette(args.brand, args.include) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/brand-guidelines/tools/set_brand.py b/coderrr-skills/skills/brand-guidelines/tools/set_brand.py new file mode 100644 index 0000000..267bdd8 --- /dev/null +++ b/coderrr-skills/skills/brand-guidelines/tools/set_brand.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +""" +Configure core brand values. + +Usage: + python set_brand.py --name myproject --primary "#3b82f6" --secondary "#64748b" +""" + +import argparse +import sys +import json +from pathlib import Path + + +BRANDS_DIR = Path.home() / '.coderrr' / 'brands' + + +def hex_to_rgb(hex_color: str) -> tuple: + """Convert hex to RGB tuple.""" + hex_color = hex_color.lstrip('#') + return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) + + +def set_brand(name: str, primary: str, secondary: str, accent: str = None, fonts: dict = None): + """Set brand configuration.""" + BRANDS_DIR.mkdir(parents=True, exist_ok=True) + + brand = { + "name": name, + "colors": { + "primary": primary, + "secondary": secondary + }, + "fonts": fonts or { + "heading": "Inter", + "body": "Inter", + "mono": "JetBrains Mono" + } + } + + if accent: + brand["colors"]["accent"] = accent + + # Save brand config + brand_file = BRANDS_DIR / f"{name}.json" + brand_file.write_text(json.dumps(brand, indent=2)) + + return { + "status": "success", + "brand": name, + "file": str(brand_file), + "colors": brand["colors"], + "fonts": brand["fonts"] + } + + +def main(): + parser = argparse.ArgumentParser(description='Set brand configuration') + parser.add_argument('--name', required=True, help='Brand name') + parser.add_argument('--primary', required=True, help='Primary color (hex)') + parser.add_argument('--secondary', required=True, help='Secondary color (hex)') + parser.add_argument('--accent', help='Accent color (hex)') + parser.add_argument('--fonts', help='Font configuration JSON') + + args = parser.parse_args() + + fonts = None + if args.fonts: + try: + fonts = json.loads(args.fonts) + except json.JSONDecodeError as e: + print(f"Error: Invalid fonts JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = set_brand(args.name, args.primary, args.secondary, args.accent, fonts) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/code-analyzer/Skills.md b/coderrr-skills/skills/code-analyzer/Skills.md new file mode 100644 index 0000000..ae5e7a5 --- /dev/null +++ b/coderrr-skills/skills/code-analyzer/Skills.md @@ -0,0 +1,175 @@ +--- +name: code-analyzer +description: Analyze code quality, structure, and maintainability. Use this skill when the user asks to lint code, count lines of code, find TODO/FIXME comments, analyze code structure, check for issues, or audit a codebase. Provides static analysis for Python and line counting for multiple languages. +--- + +This skill provides code quality analysis without external dependencies. It uses Python's AST module for static analysis and pattern matching for comment detection, supporting multiple programming languages for line counting. + +The user provides code files or directories to analyze. They may want quality checks, metrics, or to find action items like TODOs scattered through the codebase. + +## Approach + +Before invoking tools, understand the analysis goal: +- **Quality check**: Use `lint_python` for syntax and import issues +- **Size metrics**: Use `count_lines` for comprehensive line statistics +- **Action items**: Use `find_todos` to locate TODO, FIXME, HACK markers +- **Full audit**: Run all three tools sequentially for complete picture + +## Tools + +### lint_python + +Performs static analysis on Python files using the AST module. Detects syntax errors, unused imports, and provides code structure metrics. + +```bash +python tools/lint_python.py --file +``` + +**Arguments:** +- `--file` (required): Path to Python file to analyze + +**Output:** JSON with errors, warnings, and info (function/class/import counts). + +**What it detects:** +- Syntax errors (with line numbers) +- Unused imports +- Function and class counts +- Import analysis + +**When to use:** +- Quick quality check before committing +- Finding unused imports to clean up +- Getting code structure overview +- Validating Python syntax + +**Limitations:** Uses only stdlib AST, so it won't detect runtime errors, type issues, or complex linting rules that tools like flake8/pylint catch. + +--- + +### count_lines + +Counts lines of code with detailed breakdown by type (code, comments, blank) and language. + +```bash +python tools/count_lines.py --path +``` + +**Arguments:** +- `--path` (required): File or directory to analyze + +**Output:** JSON with summary totals and per-language breakdown. + +**Supported languages:** +- Python (`.py`) +- JavaScript/TypeScript (`.js`, `.ts`, `.jsx`, `.tsx`) +- Java (`.java`) +- C/C++ (`.c`, `.cpp`, `.h`, `.hpp`) +- Go (`.go`) +- Rust (`.rs`) +- Ruby (`.rb`) +- PHP, Swift, Kotlin, Scala, C# + +**When to use:** +- Estimating project size +- Comparing code vs comment ratios +- Understanding language distribution +- Tracking codebase growth + +--- + +### find_todos + +Finds TODO, FIXME, HACK, XXX, BUG, and NOTE comments throughout codebase. + +```bash +python tools/find_todos.py --path [--types ] +``` + +**Arguments:** +- `--path` (required): File or directory to search +- `--types` (optional): Comma-separated marker types (default: `TODO,FIXME,HACK,XXX,BUG,NOTE`) + +**Output:** JSON with count, breakdown by type, and list of all items with file/line/text. + +**When to use:** +- Reviewing technical debt +- Finding incomplete implementations +- Tracking known issues in code +- Generating action item lists + +## Common Patterns + +### Quick Python File Check +```bash +python tools/lint_python.py --file ./main.py +``` + +### Full Directory Analysis +```bash +python tools/count_lines.py --path ./src +``` + +### Find Only Critical Items +```bash +python tools/find_todos.py --path ./src --types FIXME,BUG +``` + +### Complete Code Audit +```bash +# Run all three for comprehensive analysis +python tools/lint_python.py --file ./main.py +python tools/count_lines.py --path ./src +python tools/find_todos.py --path ./src +``` + +## Best Practices + +1. **Run lint before commits** - Catch syntax errors and unused imports early +2. **Track line counts over time** - Monitor codebase growth +3. **Review TODOs regularly** - Don't let technical debt accumulate +4. **Focus on high-priority markers** - FIXME and BUG are usually more urgent than TODO +5. **Combine with file-search** - Find specific files first, then analyze them + +## Interpreting Results + +### lint_python Output +```json +{ + "file": "./main.py", + "errors": [], // Syntax errors - must fix + "warnings": [ // Quality issues - should fix + {"line": 1, "type": "unused_import", "message": "Unused import: os"} + ], + "info": { + "functions": 5, // Code structure overview + "classes": 2, + "imports": 8 + } +} +``` + +### count_lines Output +```json +{ + "summary": { + "total_lines": 1500, + "code_lines": 1100, // Executable code + "comment_lines": 200, // Documentation + "blank_lines": 200 // Formatting + } +} +``` + +A healthy ratio is roughly 70-80% code, 10-20% comments, 10-15% blank lines. + +## Error Handling + +| Exit Code | Meaning | Recovery | +|-----------|---------|----------| +| 0 | Success | - | +| 1 | Invalid file path | Verify file exists | +| 2 | File parsing error | Check file encoding, syntax | + +## Dependencies + +None - uses Python's standard library only (ast, os, re, json). diff --git a/coderrr-skills/skills/code-analyzer/requirements.txt b/coderrr-skills/skills/code-analyzer/requirements.txt new file mode 100644 index 0000000..0f0cc73 --- /dev/null +++ b/coderrr-skills/skills/code-analyzer/requirements.txt @@ -0,0 +1,2 @@ +# No external dependencies required +# This skill uses Python's standard library only diff --git a/coderrr-skills/skills/code-analyzer/tools/count_lines.py b/coderrr-skills/skills/code-analyzer/tools/count_lines.py new file mode 100644 index 0000000..0cc8cf9 --- /dev/null +++ b/coderrr-skills/skills/code-analyzer/tools/count_lines.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 +""" +Count lines of code with detailed breakdown. + +This tool counts total lines, code lines, comment lines, and blank lines +for various programming languages. + +Usage: + python count_lines.py --path ./main.py + python count_lines.py --path ./src + +Exit Codes: + 0 - Success + 1 - Invalid path +""" + +import argparse +import sys +import json +import os +from pathlib import Path +from typing import Dict, Any +from collections import defaultdict + + +# Language definitions: extension -> (name, single_comment, multi_start, multi_end) +LANGUAGES = { + '.py': ('python', '#', '"""', '"""'), + '.pyw': ('python', '#', '"""', '"""'), + '.js': ('javascript', '//', '/*', '*/'), + '.jsx': ('javascript', '//', '/*', '*/'), + '.ts': ('typescript', '//', '/*', '*/'), + '.tsx': ('typescript', '//', '/*', '*/'), + '.java': ('java', '//', '/*', '*/'), + '.c': ('c', '//', '/*', '*/'), + '.h': ('c', '//', '/*', '*/'), + '.cpp': ('cpp', '//', '/*', '*/'), + '.hpp': ('cpp', '//', '/*', '*/'), + '.cc': ('cpp', '//', '/*', '*/'), + '.go': ('go', '//', '/*', '*/'), + '.rs': ('rust', '//', '/*', '*/'), + '.rb': ('ruby', '#', '=begin', '=end'), + '.php': ('php', '//', '/*', '*/'), + '.swift': ('swift', '//', '/*', '*/'), + '.kt': ('kotlin', '//', '/*', '*/'), + '.scala': ('scala', '//', '/*', '*/'), + '.cs': ('csharp', '//', '/*', '*/'), +} + + +def count_file_lines(file_path: Path) -> Dict[str, int]: + """Count lines in a single file.""" + result = { + 'total_lines': 0, + 'code_lines': 0, + 'comment_lines': 0, + 'blank_lines': 0 + } + + ext = file_path.suffix.lower() + if ext not in LANGUAGES: + return result + + _, single_comment, multi_start, multi_end = LANGUAGES[ext] + + try: + with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + in_multiline = False + + for line in f: + result['total_lines'] += 1 + stripped = line.strip() + + if not stripped: + result['blank_lines'] += 1 + continue + + # Handle multiline comments + if in_multiline: + result['comment_lines'] += 1 + if multi_end in stripped: + in_multiline = False + continue + + # Check for multiline comment start + if multi_start in stripped: + result['comment_lines'] += 1 + if multi_end not in stripped[stripped.index(multi_start) + len(multi_start):]: + in_multiline = True + continue + + # Check for single-line comment + if stripped.startswith(single_comment): + result['comment_lines'] += 1 + continue + + # It's a code line + result['code_lines'] += 1 + + except (IOError, OSError): + pass + + return result + + +def count_lines(path_str: str) -> Dict[str, Any]: + """ + Count lines of code in a file or directory. + + Args: + path_str: Path to file or directory + + Returns: + Dictionary with line counts + + Raises: + ValueError: If path doesn't exist + """ + path = Path(path_str) + + if not path.exists(): + raise ValueError(f"Path does not exist: {path_str}") + + summary = { + 'total_lines': 0, + 'code_lines': 0, + 'comment_lines': 0, + 'blank_lines': 0 + } + + by_language: Dict[str, Dict[str, int]] = defaultdict( + lambda: {'files': 0, 'total_lines': 0, 'code_lines': 0, 'comment_lines': 0, 'blank_lines': 0} + ) + + files_to_process = [] + + if path.is_file(): + files_to_process.append(path) + else: + for root, _, files in os.walk(path): + for filename in files: + file_path = Path(root) / filename + if file_path.suffix.lower() in LANGUAGES: + files_to_process.append(file_path) + + for file_path in files_to_process: + counts = count_file_lines(file_path) + + if counts['total_lines'] > 0: + ext = file_path.suffix.lower() + lang_name = LANGUAGES.get(ext, ('unknown',))[0] + + summary['total_lines'] += counts['total_lines'] + summary['code_lines'] += counts['code_lines'] + summary['comment_lines'] += counts['comment_lines'] + summary['blank_lines'] += counts['blank_lines'] + + by_language[lang_name]['files'] += 1 + by_language[lang_name]['total_lines'] += counts['total_lines'] + by_language[lang_name]['code_lines'] += counts['code_lines'] + by_language[lang_name]['comment_lines'] += counts['comment_lines'] + by_language[lang_name]['blank_lines'] += counts['blank_lines'] + + return { + 'path': str(path), + 'summary': summary, + 'by_language': dict(by_language) + } + + +def main(): + parser = argparse.ArgumentParser( + description='Count lines of code', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python count_lines.py --path ./main.py + python count_lines.py --path ./src + ''' + ) + parser.add_argument( + '--path', + required=True, + help='File or directory to analyze' + ) + + args = parser.parse_args() + + try: + result = count_lines(args.path) + print(json.dumps(result, indent=2)) + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/code-analyzer/tools/find_todos.py b/coderrr-skills/skills/code-analyzer/tools/find_todos.py new file mode 100644 index 0000000..b934d57 --- /dev/null +++ b/coderrr-skills/skills/code-analyzer/tools/find_todos.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python3 +""" +Find TODO, FIXME, HACK, and XXX comments in code. + +This tool searches for common task markers in code comments +and outputs their locations. + +Usage: + python find_todos.py --path ./src + python find_todos.py --path ./src --types FIXME,TODO + +Exit Codes: + 0 - Success + 1 - Invalid path +""" + +import argparse +import sys +import json +import os +import re +from pathlib import Path +from typing import Dict, Any, List + + +# Default markers to search for +DEFAULT_MARKERS = ['TODO', 'FIXME', 'HACK', 'XXX', 'BUG', 'NOTE'] + +# File extensions to search +SEARCHABLE_EXTENSIONS = { + '.py', '.pyw', '.js', '.jsx', '.ts', '.tsx', '.java', '.c', '.cpp', '.h', '.hpp', + '.cc', '.cs', '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.scala', '.r', + '.sql', '.sh', '.bash', '.zsh', '.ps1', '.bat', '.cmd', + '.html', '.css', '.scss', '.sass', '.less', '.vue', '.svelte', + '.md', '.txt', '.rst', '.yaml', '.yml', '.toml', '.xml' +} + + +def find_todos_in_file(file_path: Path, markers: List[str]) -> List[Dict[str, Any]]: + """Find TODO-like comments in a single file.""" + todos = [] + + # Build pattern for markers + pattern = r'\b(' + '|'.join(re.escape(m) for m in markers) + r')[\s:]*(.*)$' + regex = re.compile(pattern, re.IGNORECASE) + + try: + with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + for line_num, line in enumerate(f, 1): + match = regex.search(line) + if match: + marker_type = match.group(1).upper() + text = match.group(2).strip() + + # Clean up the text (remove trailing comment markers) + text = re.sub(r'[\*/]+\s*$', '', text).strip() + + todos.append({ + 'file': str(file_path), + 'line': line_num, + 'type': marker_type, + 'text': text if text else '(no description)' + }) + except (IOError, OSError): + pass + + return todos + + +def find_todos(path_str: str, marker_types: List[str] = None) -> Dict[str, Any]: + """ + Find TODO-like comments in files. + + Args: + path_str: Path to file or directory + marker_types: List of marker types to search for + + Returns: + Dictionary with found items + + Raises: + ValueError: If path doesn't exist + """ + path = Path(path_str) + + if not path.exists(): + raise ValueError(f"Path does not exist: {path_str}") + + markers = marker_types if marker_types else DEFAULT_MARKERS + all_todos = [] + + if path.is_file(): + all_todos.extend(find_todos_in_file(path, markers)) + else: + for root, _, files in os.walk(path): + for filename in files: + file_path = Path(root) / filename + if file_path.suffix.lower() in SEARCHABLE_EXTENSIONS: + all_todos.extend(find_todos_in_file(file_path, markers)) + + # Sort by file and line number + all_todos.sort(key=lambda x: (x['file'], x['line'])) + + # Group by type for summary + by_type: Dict[str, int] = {} + for todo in all_todos: + by_type[todo['type']] = by_type.get(todo['type'], 0) + 1 + + return { + 'count': len(all_todos), + 'by_type': by_type, + 'items': all_todos + } + + +def main(): + parser = argparse.ArgumentParser( + description='Find TODO, FIXME, HACK, and XXX comments', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python find_todos.py --path ./src + python find_todos.py --path ./src --types FIXME,TODO + python find_todos.py --path ./main.py + ''' + ) + parser.add_argument( + '--path', + required=True, + help='File or directory to search' + ) + parser.add_argument( + '--types', + help='Comma-separated list of marker types (default: TODO,FIXME,HACK,XXX,BUG,NOTE)' + ) + + args = parser.parse_args() + + marker_types = None + if args.types: + marker_types = [t.strip().upper() for t in args.types.split(',')] + + try: + result = find_todos(args.path, marker_types) + print(json.dumps(result, indent=2)) + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/code-analyzer/tools/lint_python.py b/coderrr-skills/skills/code-analyzer/tools/lint_python.py new file mode 100644 index 0000000..01c9341 --- /dev/null +++ b/coderrr-skills/skills/code-analyzer/tools/lint_python.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 +""" +Basic Python linting using the AST module. + +This tool performs static analysis on Python files to detect common issues +like syntax errors, unused imports, and provides code statistics. + +Usage: + python lint_python.py --file ./main.py + +Exit Codes: + 0 - Success (even if issues found) + 1 - Invalid file path + 2 - Unable to parse file +""" + +import argparse +import sys +import json +import ast +from pathlib import Path +from typing import Dict, Any, List, Set + + +class ImportVisitor(ast.NodeVisitor): + """AST visitor to collect import information.""" + + def __init__(self): + self.imports: Set[str] = set() + self.from_imports: Dict[str, List[str]] = {} + self.all_names: Set[str] = set() + + def visit_Import(self, node): + for alias in node.names: + name = alias.asname if alias.asname else alias.name + self.imports.add(name) + self.generic_visit(node) + + def visit_ImportFrom(self, node): + module = node.module or '' + for alias in node.names: + name = alias.asname if alias.asname else alias.name + if name == '*': + continue + self.imports.add(name) + if module not in self.from_imports: + self.from_imports[module] = [] + self.from_imports[module].append(name) + self.generic_visit(node) + + +class NameVisitor(ast.NodeVisitor): + """AST visitor to collect all name usages.""" + + def __init__(self): + self.used_names: Set[str] = set() + self.defined_names: Set[str] = set() + self.function_count = 0 + self.class_count = 0 + + def visit_Name(self, node): + if isinstance(node.ctx, ast.Load): + self.used_names.add(node.id) + elif isinstance(node.ctx, ast.Store): + self.defined_names.add(node.id) + self.generic_visit(node) + + def visit_FunctionDef(self, node): + self.function_count += 1 + self.defined_names.add(node.name) + self.generic_visit(node) + + def visit_AsyncFunctionDef(self, node): + self.function_count += 1 + self.defined_names.add(node.name) + self.generic_visit(node) + + def visit_ClassDef(self, node): + self.class_count += 1 + self.defined_names.add(node.name) + self.generic_visit(node) + + +def lint_python(file_path: str) -> Dict[str, Any]: + """ + Perform basic linting on a Python file. + + Args: + file_path: Path to the Python file + + Returns: + Dictionary with lint results + + Raises: + ValueError: If file doesn't exist or isn't Python + SyntaxError: If file has syntax errors + """ + path = Path(file_path) + + if not path.exists(): + raise ValueError(f"File does not exist: {file_path}") + + if path.suffix != '.py': + raise ValueError(f"Not a Python file: {file_path}") + + with open(path, 'r', encoding='utf-8') as f: + source = f.read() + + result = { + 'file': str(path), + 'errors': [], + 'warnings': [], + 'info': { + 'functions': 0, + 'classes': 0, + 'imports': 0 + } + } + + # Try to parse the file + try: + tree = ast.parse(source, filename=str(path)) + except SyntaxError as e: + result['errors'].append({ + 'line': e.lineno, + 'type': 'syntax_error', + 'message': str(e.msg) + }) + return result + + # Collect imports + import_visitor = ImportVisitor() + import_visitor.visit(tree) + + # Collect name usages + name_visitor = NameVisitor() + name_visitor.visit(tree) + + # Check for unused imports + for imp in import_visitor.imports: + if imp not in name_visitor.used_names: + result['warnings'].append({ + 'line': 1, # AST doesn't easily give us the line for this + 'type': 'unused_import', + 'message': f"Unused import: {imp}" + }) + + # Update info + result['info']['functions'] = name_visitor.function_count + result['info']['classes'] = name_visitor.class_count + result['info']['imports'] = len(import_visitor.imports) + + return result + + +def main(): + parser = argparse.ArgumentParser( + description='Basic Python linting', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python lint_python.py --file ./main.py + python lint_python.py --file ./src/utils.py + ''' + ) + parser.add_argument( + '--file', + required=True, + help='Python file to lint' + ) + + args = parser.parse_args() + + try: + result = lint_python(args.file) + print(json.dumps(result, indent=2)) + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error parsing file: {e}", file=sys.stderr) + sys.exit(2) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/docx/Skills.md b/coderrr-skills/skills/docx/Skills.md new file mode 100644 index 0000000..a06b659 --- /dev/null +++ b/coderrr-skills/skills/docx/Skills.md @@ -0,0 +1,160 @@ +--- +name: docx +description: Create, edit, and analyze Word documents with professional formatting. Use this skill when the user asks to create Word documents, add content to DOCX files, extract text from Word files, work with tables, headers, footers, or analyze document structure. Supports tracked changes, comments, and advanced formatting. +--- + +This skill provides comprehensive Word document manipulation using python-docx. It handles document creation, content extraction, formatting, and structural analysis. + +The user provides document requirements or existing files to process. They may want to create new documents, modify existing ones, or extract information from Word files. + +## Approach + +Before invoking tools, understand the document operation: +- **Create new**: Use `create_docx` with content structure +- **Extract content**: Use `read_docx` to get text, tables, or metadata +- **Modify existing**: Use `edit_docx` to add or update content +- **Analyze structure**: Use `analyze_docx` for document breakdown + +## Tools + +### create_docx + +Creates a new Word document with specified content and formatting. + +```bash +python tools/create_docx.py --output --title [--content <json>] [--template <path>] +``` + +**Arguments:** +- `--output` (required): Output file path (.docx) +- `--title` (required): Document title +- `--content` (optional): JSON structure defining document content +- `--template` (optional): Path to template document + +**Content JSON Structure:** +```json +{ + "sections": [ + {"type": "heading", "level": 1, "text": "Main Title"}, + {"type": "paragraph", "text": "Body text here..."}, + {"type": "heading", "level": 2, "text": "Subsection"}, + {"type": "list", "items": ["Item 1", "Item 2", "Item 3"], "ordered": false}, + {"type": "table", "headers": ["Col1", "Col2"], "rows": [["A", "B"], ["C", "D"]]} + ] +} +``` + +**When to use:** +- Generating reports +- Creating structured documents +- Building documents from templates +- Automating document workflows + +--- + +### read_docx + +Extracts content from existing Word documents. + +```bash +python tools/read_docx.py --file <path> [--format <text|json|markdown>] [--include-tables] +``` + +**Arguments:** +- `--file` (required): Path to Word document +- `--format` (optional): Output format - `text`, `json`, or `markdown` (default: text) +- `--include-tables` (optional): Include table data in output + +**Output:** Document content in specified format. + +**When to use:** +- Extracting text for analysis +- Converting Word to other formats +- Reading document structure +- Processing uploaded documents + +--- + +### edit_docx + +Modifies an existing Word document. + +```bash +python tools/edit_docx.py --file <path> --output <path> --operations <json> +``` + +**Arguments:** +- `--file` (required): Input Word document +- `--output` (required): Output file path +- `--operations` (required): JSON array of edit operations + +**Operations JSON:** +```json +[ + {"action": "append_paragraph", "text": "New paragraph"}, + {"action": "replace_text", "find": "old text", "replace": "new text"}, + {"action": "add_heading", "text": "New Section", "level": 2}, + {"action": "insert_table", "headers": ["A", "B"], "rows": [["1", "2"]]} +] +``` + +**When to use:** +- Adding content to existing documents +- Find and replace operations +- Appending sections +- Batch document updates + +--- + +### analyze_docx + +Analyzes document structure and provides detailed metadata. + +```bash +python tools/analyze_docx.py --file <path> +``` + +**Arguments:** +- `--file` (required): Path to Word document + +**Output:** JSON with word count, paragraph count, heading structure, table count, styles used, and more. + +**When to use:** +- Auditing document structure +- Checking document properties +- Understanding document composition +- Quality assurance checks + +## Common Patterns + +### Create a Simple Report +```bash +python tools/create_docx.py --output report.docx --title "Monthly Report" --content '{"sections": [{"type": "heading", "level": 1, "text": "Summary"}, {"type": "paragraph", "text": "This month we achieved..."}]}' +``` + +### Extract All Text +```bash +python tools/read_docx.py --file document.docx --format text +``` + +### Add Section to Existing Document +```bash +python tools/edit_docx.py --file original.docx --output updated.docx --operations '[{"action": "append_paragraph", "text": "Additional content here"}]' +``` + +### Get Document Statistics +```bash +python tools/analyze_docx.py --file document.docx +``` + +## Best Practices + +1. **Use templates** - Start from well-formatted templates for consistent styling +2. **Structure content as JSON** - Makes complex documents reproducible +3. **Preserve originals** - Always output to new file when editing +4. **Check analysis first** - Understand document structure before modifying +5. **Use markdown format** - Great for further processing or display + +## Dependencies + +Requires `python-docx>=0.8.11`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/docx/requirements.txt b/coderrr-skills/skills/docx/requirements.txt new file mode 100644 index 0000000..339aa01 --- /dev/null +++ b/coderrr-skills/skills/docx/requirements.txt @@ -0,0 +1 @@ +python-docx>=0.8.11 diff --git a/coderrr-skills/skills/docx/tools/analyze_docx.py b/coderrr-skills/skills/docx/tools/analyze_docx.py new file mode 100644 index 0000000..c65a866 --- /dev/null +++ b/coderrr-skills/skills/docx/tools/analyze_docx.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +Analyze Word document structure and metadata. + +Usage: + python analyze_docx.py --file document.docx +""" + +import argparse +import sys +import json +from pathlib import Path +from collections import Counter + +try: + from docx import Document +except ImportError: + print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) + sys.exit(1) + + +def analyze_docx(file_path: str) -> dict: + """Analyze document structure and provide metadata.""" + doc = Document(file_path) + + # Count words and paragraphs + word_count = 0 + paragraph_count = 0 + heading_count = 0 + styles_used = Counter() + headings = [] + + for para in doc.paragraphs: + if para.text.strip(): + paragraph_count += 1 + word_count += len(para.text.split()) + + style_name = para.style.name if para.style else "Normal" + styles_used[style_name] += 1 + + if 'Heading' in style_name: + heading_count += 1 + headings.append({ + "text": para.text[:100], # Truncate long headings + "style": style_name + }) + + # Count tables + table_count = len(doc.tables) + + # Get document properties + core_props = doc.core_properties + + return { + "file": str(file_path), + "statistics": { + "word_count": word_count, + "paragraph_count": paragraph_count, + "heading_count": heading_count, + "table_count": table_count, + "section_count": len(doc.sections) + }, + "structure": { + "headings": headings[:20], # Limit to first 20 + "styles_used": dict(styles_used.most_common(10)) + }, + "properties": { + "title": core_props.title or "", + "author": core_props.author or "", + "created": str(core_props.created) if core_props.created else "", + "modified": str(core_props.modified) if core_props.modified else "" + } + } + + +def main(): + parser = argparse.ArgumentParser(description='Analyze Word documents') + parser.add_argument('--file', required=True, help='Path to Word document') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = analyze_docx(args.file) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/docx/tools/create_docx.py b/coderrr-skills/skills/docx/tools/create_docx.py new file mode 100644 index 0000000..4a8e13e --- /dev/null +++ b/coderrr-skills/skills/docx/tools/create_docx.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +""" +Create Word documents with structured content. + +Usage: + python create_docx.py --output report.docx --title "Report" --content '{"sections": [...]}' +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from docx import Document + from docx.shared import Inches, Pt + from docx.enum.text import WD_ALIGN_PARAGRAPH +except ImportError: + print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) + sys.exit(1) + + +def add_content_section(doc, section): + """Add a content section to the document.""" + section_type = section.get('type', 'paragraph') + + if section_type == 'heading': + level = section.get('level', 1) + doc.add_heading(section.get('text', ''), level=level) + + elif section_type == 'paragraph': + para = doc.add_paragraph(section.get('text', '')) + if section.get('bold'): + for run in para.runs: + run.bold = True + if section.get('italic'): + for run in para.runs: + run.italic = True + + elif section_type == 'list': + items = section.get('items', []) + ordered = section.get('ordered', False) + style = 'List Number' if ordered else 'List Bullet' + for item in items: + doc.add_paragraph(item, style=style) + + elif section_type == 'table': + headers = section.get('headers', []) + rows = section.get('rows', []) + + if headers: + table = doc.add_table(rows=1, cols=len(headers)) + table.style = 'Table Grid' + + # Add headers + header_cells = table.rows[0].cells + for i, header in enumerate(headers): + header_cells[i].text = str(header) + + # Add data rows + for row_data in rows: + row_cells = table.add_row().cells + for i, cell_data in enumerate(row_data): + if i < len(row_cells): + row_cells[i].text = str(cell_data) + + elif section_type == 'page_break': + doc.add_page_break() + + +def create_docx(output_path: str, title: str, content: dict = None, template_path: str = None): + """Create a Word document.""" + if template_path and Path(template_path).exists(): + doc = Document(template_path) + else: + doc = Document() + + # Add title + doc.add_heading(title, level=0) + + # Add content sections + if content and 'sections' in content: + for section in content['sections']: + add_content_section(doc, section) + + # Save document + doc.save(output_path) + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Create Word documents') + parser.add_argument('--output', required=True, help='Output file path (.docx)') + parser.add_argument('--title', required=True, help='Document title') + parser.add_argument('--content', help='JSON structure defining document content') + parser.add_argument('--template', help='Path to template document') + + args = parser.parse_args() + + content = None + if args.content: + try: + content = json.loads(args.content) + except json.JSONDecodeError as e: + print(f"Error: Invalid content JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = create_docx(args.output, args.title, content, args.template) + print(json.dumps({"status": "success", "file": result})) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/docx/tools/edit_docx.py b/coderrr-skills/skills/docx/tools/edit_docx.py new file mode 100644 index 0000000..0248a26 --- /dev/null +++ b/coderrr-skills/skills/docx/tools/edit_docx.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +""" +Edit existing Word documents. + +Usage: + python edit_docx.py --file input.docx --output output.docx --operations '[...]' +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from docx import Document +except ImportError: + print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) + sys.exit(1) + + +def apply_operation(doc, operation): + """Apply a single edit operation to the document.""" + action = operation.get('action') + + if action == 'append_paragraph': + doc.add_paragraph(operation.get('text', '')) + + elif action == 'add_heading': + level = operation.get('level', 1) + doc.add_heading(operation.get('text', ''), level=level) + + elif action == 'replace_text': + find_text = operation.get('find', '') + replace_text = operation.get('replace', '') + for para in doc.paragraphs: + if find_text in para.text: + for run in para.runs: + if find_text in run.text: + run.text = run.text.replace(find_text, replace_text) + + elif action == 'insert_table': + headers = operation.get('headers', []) + rows = operation.get('rows', []) + + if headers: + table = doc.add_table(rows=1, cols=len(headers)) + table.style = 'Table Grid' + + header_cells = table.rows[0].cells + for i, header in enumerate(headers): + header_cells[i].text = str(header) + + for row_data in rows: + row_cells = table.add_row().cells + for i, cell_data in enumerate(row_data): + if i < len(row_cells): + row_cells[i].text = str(cell_data) + + elif action == 'add_page_break': + doc.add_page_break() + + elif action == 'add_list': + items = operation.get('items', []) + ordered = operation.get('ordered', False) + style = 'List Number' if ordered else 'List Bullet' + for item in items: + doc.add_paragraph(item, style=style) + + +def edit_docx(input_path: str, output_path: str, operations: list): + """Edit a Word document with specified operations.""" + doc = Document(input_path) + + for operation in operations: + apply_operation(doc, operation) + + doc.save(output_path) + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Edit Word documents') + parser.add_argument('--file', required=True, help='Input Word document') + parser.add_argument('--output', required=True, help='Output file path') + parser.add_argument('--operations', required=True, help='JSON array of edit operations') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + operations = json.loads(args.operations) + except json.JSONDecodeError as e: + print(f"Error: Invalid operations JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = edit_docx(args.file, args.output, operations) + print(json.dumps({"status": "success", "file": result})) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/docx/tools/read_docx.py b/coderrr-skills/skills/docx/tools/read_docx.py new file mode 100644 index 0000000..4ef7a6a --- /dev/null +++ b/coderrr-skills/skills/docx/tools/read_docx.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +""" +Read and extract content from Word documents. + +Usage: + python read_docx.py --file document.docx --format text +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from docx import Document +except ImportError: + print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) + sys.exit(1) + + +def extract_tables(doc): + """Extract all tables from document.""" + tables = [] + for table in doc.tables: + table_data = [] + for row in table.rows: + row_data = [cell.text for cell in row.cells] + table_data.append(row_data) + tables.append(table_data) + return tables + + +def read_docx_text(file_path: str, include_tables: bool = False) -> str: + """Extract text content from Word document.""" + doc = Document(file_path) + + paragraphs = [para.text for para in doc.paragraphs if para.text.strip()] + text = '\n\n'.join(paragraphs) + + if include_tables: + tables = extract_tables(doc) + for i, table in enumerate(tables): + text += f"\n\n[Table {i + 1}]\n" + for row in table: + text += ' | '.join(row) + '\n' + + return text + + +def read_docx_json(file_path: str, include_tables: bool = False) -> dict: + """Extract structured content from Word document.""" + doc = Document(file_path) + + result = { + "paragraphs": [], + "headings": [], + "tables": [] + } + + for para in doc.paragraphs: + if para.text.strip(): + style_name = para.style.name if para.style else "Normal" + if 'Heading' in style_name: + result["headings"].append({ + "text": para.text, + "level": style_name + }) + result["paragraphs"].append({ + "text": para.text, + "style": style_name + }) + + if include_tables: + result["tables"] = extract_tables(doc) + + return result + + +def read_docx_markdown(file_path: str, include_tables: bool = False) -> str: + """Convert Word document to Markdown.""" + doc = Document(file_path) + + md_lines = [] + + for para in doc.paragraphs: + if not para.text.strip(): + continue + + style_name = para.style.name if para.style else "Normal" + + if 'Heading 1' in style_name: + md_lines.append(f"# {para.text}") + elif 'Heading 2' in style_name: + md_lines.append(f"## {para.text}") + elif 'Heading 3' in style_name: + md_lines.append(f"### {para.text}") + elif 'List' in style_name: + md_lines.append(f"- {para.text}") + else: + md_lines.append(para.text) + + md_lines.append("") + + if include_tables: + tables = extract_tables(doc) + for table in tables: + if table: + # Header row + md_lines.append("| " + " | ".join(table[0]) + " |") + md_lines.append("| " + " | ".join(["---"] * len(table[0])) + " |") + # Data rows + for row in table[1:]: + md_lines.append("| " + " | ".join(row) + " |") + md_lines.append("") + + return '\n'.join(md_lines) + + +def main(): + parser = argparse.ArgumentParser(description='Read Word documents') + parser.add_argument('--file', required=True, help='Path to Word document') + parser.add_argument('--format', choices=['text', 'json', 'markdown'], default='text', + help='Output format (default: text)') + parser.add_argument('--include-tables', action='store_true', help='Include table data') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + if args.format == 'text': + result = read_docx_text(args.file, args.include_tables) + print(result) + elif args.format == 'json': + result = read_docx_json(args.file, args.include_tables) + print(json.dumps(result, indent=2)) + elif args.format == 'markdown': + result = read_docx_markdown(args.file, args.include_tables) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/file-search/Skills.md b/coderrr-skills/skills/file-search/Skills.md new file mode 100644 index 0000000..7bdac35 --- /dev/null +++ b/coderrr-skills/skills/file-search/Skills.md @@ -0,0 +1,138 @@ +--- +name: file-search +description: Find files and search content within your filesystem. Use this skill when the user asks to find files by name or pattern, search for text within files (grep-like operations), get directory statistics, count files, or analyze folder structure. Handles glob patterns, regex search, and comprehensive file system analysis. +--- + +This skill provides powerful filesystem search and analysis capabilities using only Python's standard library. It handles file discovery, content searching, and statistical analysis of directories. + +The user provides a search query, file pattern, or directory path. They may want to find specific files, search for text patterns, or understand the structure of a codebase. + +## Approach + +Before invoking tools, understand the search intent: +- **Find files by name/pattern**: Use `find_files` with glob patterns +- **Search within file contents**: Use `search_content` with text or regex queries +- **Analyze directory structure**: Use `file_stats` for size, counts, and composition +- **Combined operations**: Chain tools for complex queries (e.g., find Python files, then search within them) + +## Tools + +### find_files + +Recursively finds files and directories matching glob patterns. + +```bash +python tools/find_files.py --pattern <glob> --path <directory> [--type <file|dir|all>] +``` + +**Arguments:** +- `--pattern` (required): Glob pattern to match (e.g., `*.py`, `**/*.json`, `test_*`) +- `--path` (required): Directory to search in +- `--type` (optional): Filter by type - `file`, `dir`, or `all` (default: all) + +**Output:** JSON array of matching paths. + +**When to use:** +- Finding all files of a certain type +- Locating configuration files +- Discovering test files or specific modules +- Listing directories matching a pattern + +**Glob Pattern Guide:** +- `*` matches any characters in a single path segment +- `**` matches any characters across path segments (recursive) +- `?` matches a single character +- `[abc]` matches any character in brackets + +--- + +### search_content + +Searches for text patterns within files. Similar to grep but outputs structured JSON. + +```bash +python tools/search_content.py --query <text> --path <file_or_dir> [--regex] +``` + +**Arguments:** +- `--query` (required): Text or regex pattern to search for +- `--path` (required): File or directory to search in +- `--regex` (optional): Treat query as a regular expression + +**Output:** JSON array of matches with file, line number, and content. + +**When to use:** +- Finding where a function or variable is used +- Locating TODO comments or specific strings +- Searching for import statements +- Finding configuration values + +**Supported file types:** Python, JavaScript, TypeScript, Java, C/C++, Go, Rust, Ruby, PHP, HTML, CSS, JSON, YAML, Markdown, and more. + +--- + +### file_stats + +Analyzes files and directories, providing comprehensive statistics. + +```bash +python tools/file_stats.py --path <file_or_dir> +``` + +**Arguments:** +- `--path` (required): File or directory to analyze + +**Output:** JSON with file counts, sizes, type breakdown, and largest files. + +**When to use:** +- Understanding codebase composition +- Finding the largest files in a project +- Counting files by type +- Auditing directory structure + +## Common Patterns + +### Find All Python Files in Project +```bash +python tools/find_files.py --pattern "**/*.py" --path ./src --type file +``` + +### Search for Function Usage +```bash +python tools/search_content.py --query "def process_data" --path ./src +``` + +### Find Imports with Regex +```bash +python tools/search_content.py --query "^import\s+\w+" --path ./src --regex +``` + +### Get Project Statistics +```bash +python tools/file_stats.py --path ./my-project +``` + +### Find Only Directories +```bash +python tools/find_files.py --pattern "*test*" --path . --type dir +``` + +## Best Practices + +1. **Use specific paths** - Narrow the search scope for faster results +2. **Leverage glob patterns** - `**/*.py` is more efficient than searching everything +3. **Use regex for complex patterns** - When simple text matching isn't enough +4. **Check file_stats first** - Understand the codebase before deep searching +5. **Combine tools** - Find files first, then search within specific ones + +## Error Handling + +| Exit Code | Meaning | Recovery | +|-----------|---------|----------| +| 0 | Success | - | +| 1 | Invalid path or pattern | Verify path exists and pattern syntax | +| 2 | Permission denied | Check file permissions | + +## Dependencies + +None - uses Python's standard library only (pathlib, os, re, json). diff --git a/coderrr-skills/skills/file-search/tools/file_stats.py b/coderrr-skills/skills/file-search/tools/file_stats.py new file mode 100644 index 0000000..2fdfe60 --- /dev/null +++ b/coderrr-skills/skills/file-search/tools/file_stats.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python3 +""" +Get statistics about files and directories. + +This tool analyzes a file or directory and provides statistics including +file count, total size, file type breakdown, and largest files. + +Usage: + python file_stats.py --path ./src + python file_stats.py --path ./main.py + +Exit Codes: + 0 - Success + 1 - Invalid path +""" + +import argparse +import sys +import json +import os +from pathlib import Path +from typing import Dict, Any, List +from collections import defaultdict + + +def format_size(size_bytes: int) -> str: + """Format bytes to human-readable size.""" + for unit in ['B', 'KB', 'MB', 'GB', 'TB']: + if size_bytes < 1024.0: + return f"{size_bytes:.2f} {unit}" + size_bytes /= 1024.0 + return f"{size_bytes:.2f} PB" + + +def get_file_stats(file_path: Path) -> Dict[str, Any]: + """Get statistics for a single file.""" + stat = file_path.stat() + return { + 'path': str(file_path), + 'type': 'file', + 'size': stat.st_size, + 'size_human': format_size(stat.st_size), + 'extension': file_path.suffix or '(no extension)', + 'modified': stat.st_mtime + } + + +def get_directory_stats(dir_path: Path, top_n: int = 10) -> Dict[str, Any]: + """Get statistics for a directory.""" + file_count = 0 + dir_count = 0 + total_size = 0 + file_types: Dict[str, int] = defaultdict(int) + files_with_sizes: List[Dict[str, Any]] = [] + + try: + for root, dirs, files in os.walk(dir_path): + dir_count += len(dirs) + + for filename in files: + file_path = Path(root) / filename + file_count += 1 + + try: + size = file_path.stat().st_size + total_size += size + + ext = file_path.suffix.lower() if file_path.suffix else '(no extension)' + file_types[ext] += 1 + + files_with_sizes.append({ + 'path': str(file_path), + 'size': size + }) + except (OSError, IOError): + pass # Skip files we can't access + except PermissionError: + pass + + # Sort by size and take top N + files_with_sizes.sort(key=lambda x: x['size'], reverse=True) + largest_files = files_with_sizes[:top_n] + + # Sort file types by count + sorted_types = dict(sorted(file_types.items(), key=lambda x: x[1], reverse=True)) + + return { + 'path': str(dir_path), + 'type': 'directory', + 'file_count': file_count, + 'dir_count': dir_count, + 'total_size': total_size, + 'total_size_human': format_size(total_size), + 'file_types': sorted_types, + 'largest_files': largest_files + } + + +def file_stats(path_str: str) -> Dict[str, Any]: + """ + Get statistics about a file or directory. + + Args: + path_str: Path to analyze + + Returns: + Dictionary with statistics + + Raises: + ValueError: If path doesn't exist + """ + path = Path(path_str) + + if not path.exists(): + raise ValueError(f"Path does not exist: {path_str}") + + if path.is_file(): + return get_file_stats(path) + else: + return get_directory_stats(path) + + +def main(): + parser = argparse.ArgumentParser( + description='Get statistics about files and directories', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python file_stats.py --path ./src + python file_stats.py --path ./main.py + ''' + ) + parser.add_argument( + '--path', + required=True, + help='File or directory to analyze' + ) + + args = parser.parse_args() + + try: + stats = file_stats(args.path) + print(json.dumps(stats, indent=2)) + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except PermissionError as e: + print(f"Error: Permission denied - {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/file-search/tools/find_files.py b/coderrr-skills/skills/file-search/tools/find_files.py new file mode 100644 index 0000000..aa520a1 --- /dev/null +++ b/coderrr-skills/skills/file-search/tools/find_files.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +""" +Find files and directories matching a glob pattern. + +This tool searches a directory for files and directories that match +the specified glob pattern. + +Usage: + python find_files.py --pattern "*.py" --path ./src + python find_files.py --pattern "**/*.json" --path . --type file + +Exit Codes: + 0 - Success + 1 - Invalid path or pattern +""" + +import argparse +import sys +import json +import os +from pathlib import Path +from typing import List + + +def find_files(pattern: str, search_path: str, file_type: str = 'all') -> List[str]: + """ + Find files and directories matching a glob pattern. + + Args: + pattern: Glob pattern to match + search_path: Directory to search in + file_type: Filter by type - 'file', 'dir', or 'all' + + Returns: + List of matching paths as strings + + Raises: + ValueError: If path doesn't exist or is invalid + """ + path = Path(search_path) + + if not path.exists(): + raise ValueError(f"Path does not exist: {search_path}") + + if not path.is_dir(): + raise ValueError(f"Path is not a directory: {search_path}") + + matches = [] + + try: + for match in path.glob(pattern): + if file_type == 'file' and not match.is_file(): + continue + if file_type == 'dir' and not match.is_dir(): + continue + + # Use relative path from search directory + try: + relative = match.relative_to(path) + matches.append(str(Path(search_path) / relative)) + except ValueError: + matches.append(str(match)) + except Exception as e: + raise ValueError(f"Invalid pattern: {e}") + + return sorted(matches) + + +def main(): + parser = argparse.ArgumentParser( + description='Find files and directories matching a glob pattern', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python find_files.py --pattern "*.py" --path ./src + python find_files.py --pattern "**/*.json" --path . --type file + python find_files.py --pattern "*" --path ./project --type dir + ''' + ) + parser.add_argument( + '--pattern', + required=True, + help='Glob pattern to match (e.g., "*.py", "**/*.json")' + ) + parser.add_argument( + '--path', + required=True, + help='Directory to search in' + ) + parser.add_argument( + '--type', + choices=['file', 'dir', 'all'], + default='all', + help='Filter by type (default: all)' + ) + + args = parser.parse_args() + + try: + matches = find_files(args.pattern, args.path, args.type) + print(json.dumps(matches, indent=2)) + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except PermissionError as e: + print(f"Error: Permission denied - {e}", file=sys.stderr) + sys.exit(2) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/file-search/tools/search_content.py b/coderrr-skills/skills/file-search/tools/search_content.py new file mode 100644 index 0000000..dc0d3af --- /dev/null +++ b/coderrr-skills/skills/file-search/tools/search_content.py @@ -0,0 +1,162 @@ +#!/usr/bin/env python3 +""" +Search for text within files (grep-like functionality). + +This tool searches for text patterns within files and returns matches +with line numbers. + +Usage: + python search_content.py --query "TODO" --path ./src + python search_content.py --query "def \\w+\\(" --path ./src --regex + +Exit Codes: + 0 - Success + 1 - Invalid path or pattern +""" + +import argparse +import sys +import json +import os +import re +from pathlib import Path +from typing import List, Dict, Any + + +# File extensions to search (text files only) +SEARCHABLE_EXTENSIONS = { + '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.cpp', '.h', '.hpp', + '.cs', '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.scala', '.r', + '.sql', '.sh', '.bash', '.zsh', '.ps1', '.bat', '.cmd', + '.html', '.css', '.scss', '.sass', '.less', '.xml', '.json', '.yaml', '.yml', + '.md', '.txt', '.rst', '.ini', '.cfg', '.conf', '.env', '.toml', + '.gitignore', '.dockerignore', 'Dockerfile', 'Makefile', '.editorconfig' +} + + +def is_searchable(file_path: Path) -> bool: + """Check if a file should be searched based on extension.""" + if file_path.suffix.lower() in SEARCHABLE_EXTENSIONS: + return True + if file_path.name in SEARCHABLE_EXTENSIONS: + return True + return False + + +def search_file(file_path: Path, query: str, is_regex: bool = False) -> List[Dict[str, Any]]: + """ + Search for matches in a single file. + + Args: + file_path: Path to the file to search + query: Text or regex pattern to search for + is_regex: Whether to treat query as regex + + Returns: + List of match objects with file, line, and content + """ + matches = [] + + try: + with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: + for line_num, line in enumerate(f, 1): + if is_regex: + if re.search(query, line): + matches.append({ + 'file': str(file_path), + 'line': line_num, + 'content': line.rstrip() + }) + else: + if query.lower() in line.lower(): + matches.append({ + 'file': str(file_path), + 'line': line_num, + 'content': line.rstrip() + }) + except (IOError, OSError): + pass # Skip files that can't be read + + return matches + + +def search_content(query: str, search_path: str, is_regex: bool = False) -> List[Dict[str, Any]]: + """ + Search for text within files. + + Args: + query: Text or pattern to search for + search_path: File or directory to search in + is_regex: Whether to treat query as regex + + Returns: + List of match objects + + Raises: + ValueError: If path doesn't exist or regex is invalid + """ + path = Path(search_path) + + if not path.exists(): + raise ValueError(f"Path does not exist: {search_path}") + + # Validate regex if needed + if is_regex: + try: + re.compile(query) + except re.error as e: + raise ValueError(f"Invalid regex pattern: {e}") + + all_matches = [] + + if path.is_file(): + all_matches.extend(search_file(path, query, is_regex)) + else: + for root, _, files in os.walk(path): + for filename in files: + file_path = Path(root) / filename + if is_searchable(file_path): + all_matches.extend(search_file(file_path, query, is_regex)) + + return all_matches + + +def main(): + parser = argparse.ArgumentParser( + description='Search for text within files', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python search_content.py --query "TODO" --path ./src + python search_content.py --query "def \\w+\\(" --path ./src --regex + python search_content.py --query "import" --path ./main.py + ''' + ) + parser.add_argument( + '--query', + required=True, + help='Text or pattern to search for' + ) + parser.add_argument( + '--path', + required=True, + help='File or directory to search in' + ) + parser.add_argument( + '--regex', + action='store_true', + help='Treat query as a regular expression' + ) + + args = parser.parse_args() + + try: + matches = search_content(args.query, args.path, args.regex) + print(json.dumps(matches, indent=2)) + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/internal-comms/Skills.md b/coderrr-skills/skills/internal-comms/Skills.md new file mode 100644 index 0000000..c62d3d4 --- /dev/null +++ b/coderrr-skills/skills/internal-comms/Skills.md @@ -0,0 +1,210 @@ +--- +name: internal-comms +description: Write internal communications like status reports, newsletters, announcements, and team updates. Use this skill when the user needs to draft status reports, write team newsletters, create announcements, compose meeting summaries, or generate internal documentation. +--- + +This skill helps create professional internal communications. It generates well-structured content for various internal communication needs with appropriate tone and formatting. + +The user wants to write internal communications. They may provide context, key points, or ask for a specific type of document. + +## Approach + +When creating internal communications: +1. **Choose type**: Identify the communication type +2. **Gather context**: Collect key information and audience +3. **Generate**: Use appropriate tool for content type +4. **Format**: Export in desired format + +## Tools + +### status_report + +Generates project or team status reports. + +```bash +python tools/status_report.py --project <name> --period <period> --data <json> [--format <markdown|html>] +``` + +**Arguments:** +- `--project` (required): Project/team name +- `--period` (required): Reporting period (e.g., "Week 5", "January 2024") +- `--data` (required): Report data JSON +- `--format` (optional): Output format (default: markdown) + +**Data JSON:** +```json +{ + "highlights": ["Completed feature X", "Launched beta"], + "progress": {"tasks_completed": 15, "tasks_remaining": 8}, + "blockers": ["Waiting on API access"], + "next_steps": ["Begin testing phase"], + "metrics": {"velocity": 42, "bugs_fixed": 7} +} +``` + +**When to use:** +- Weekly/monthly status updates +- Project progress reports +- Team performance summaries + +--- + +### newsletter + +Creates internal newsletters. + +```bash +python tools/newsletter.py --title <title> --sections <json> [--format <markdown|html>] +``` + +**Arguments:** +- `--title` (required): Newsletter title +- `--sections` (required): Section content JSON +- `--format` (optional): Output format + +**Sections JSON:** +```json +[ + {"type": "intro", "content": "Welcome message..."}, + {"type": "highlight", "title": "Big Win", "content": "We achieved..."}, + {"type": "updates", "items": ["Update 1", "Update 2"]}, + {"type": "spotlight", "name": "Jane Doe", "role": "Engineer", "content": "Achievements..."}, + {"type": "upcoming", "events": [{"date": "Feb 15", "title": "All-hands"}]} +] +``` + +**When to use:** +- Weekly team newsletters +- Monthly company updates +- Department communications + +--- + +### announcement + +Creates formal announcements. + +```bash +python tools/announcement.py --type <type> --subject <subject> --content <json> [--urgency <level>] +``` + +**Arguments:** +- `--type` (required): Announcement type - `general`, `policy`, `event`, `change`, `launch` +- `--subject` (required): Announcement subject +- `--content` (required): Content details JSON +- `--urgency` (optional): Urgency level - `normal`, `important`, `urgent` + +**Content JSON:** +```json +{ + "summary": "Brief summary of announcement", + "details": "Full details and context...", + "action_items": ["Review by Friday", "Submit feedback"], + "contact": "jane@company.com", + "effective_date": "2024-02-01" +} +``` + +**When to use:** +- Policy updates +- Organizational changes +- Product launches +- Event announcements + +--- + +### meeting_summary + +Generates meeting summaries. + +```bash +python tools/meeting_summary.py --title <title> --date <date> --data <json> +``` + +**Arguments:** +- `--title` (required): Meeting title +- `--date` (required): Meeting date +- `--data` (required): Meeting data JSON + +**Data JSON:** +```json +{ + "attendees": ["Alice", "Bob", "Charlie"], + "agenda": ["Q1 planning", "Budget review"], + "discussion": [ + {"topic": "Q1 Goals", "summary": "Agreed on 3 key objectives..."}, + {"topic": "Budget", "summary": "Approved $50k allocation..."} + ], + "decisions": ["Launch in March", "Hire 2 engineers"], + "action_items": [ + {"owner": "Alice", "task": "Draft proposal", "due": "Feb 10"} + ], + "next_meeting": "Feb 15, 2024" +} +``` + +**When to use:** +- Team meeting notes +- Stakeholder meeting summaries +- Decision documentation + +--- + +### template + +Generates reusable communication templates. + +```bash +python tools/template.py --type <type> [--customize <json>] +``` + +**Arguments:** +- `--type` (required): Template type - `status`, `newsletter`, `announcement`, `meeting`, `email` +- `--customize` (optional): Customization options + +**When to use:** +- Setting up recurring communications +- Standardizing team output +- Creating document templates + +## Common Patterns + +### Weekly Status Report +```bash +python tools/status_report.py --project "Backend Team" --period "Week 5" --data '{"highlights": ["Deployed v2.0", "Fixed 12 bugs"], "progress": {"tasks_completed": 18, "tasks_remaining": 5}, "blockers": [], "next_steps": ["Performance testing"]}' +``` + +### Team Newsletter +```bash +python tools/newsletter.py --title "Engineering Weekly #12" --sections '[{"type": "intro", "content": "Great week everyone!"}, {"type": "highlight", "title": "Launch Success", "content": "Product v2.0 is live!"}, {"type": "upcoming", "events": [{"date": "Feb 20", "title": "Hackathon"}]}]' --format html +``` + +### Policy Announcement +```bash +python tools/announcement.py --type policy --subject "Remote Work Update" --content '{"summary": "New hybrid policy starting March", "details": "We are updating our remote work policy...", "effective_date": "2024-03-01"}' --urgency important +``` + +## Writing Guidelines + +### Tone +- **Status reports**: Factual, concise, data-driven +- **Newsletters**: Engaging, positive, inclusive +- **Announcements**: Clear, direct, professional +- **Meeting notes**: Structured, action-oriented + +### Structure +- Lead with key information +- Use bullet points for lists +- Include clear action items +- Provide contact for questions + +### Best Practices +1. Keep it scannable +2. Highlight important dates +3. Use consistent formatting +4. Include relevant links +5. Proofread before sending + +## Dependencies + +Uses Python standard library only. diff --git a/coderrr-skills/skills/internal-comms/tools/announcement.py b/coderrr-skills/skills/internal-comms/tools/announcement.py new file mode 100644 index 0000000..f855793 --- /dev/null +++ b/coderrr-skills/skills/internal-comms/tools/announcement.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +""" +Generate announcements. + +Usage: + python announcement.py --type general --subject "Title" --content '{...}' +""" + +import argparse +import sys +import json +from datetime import datetime + + +URGENCY_ICONS = { + "normal": "šŸ“¢", + "important": "āš ļø", + "urgent": "🚨" +} + +TYPE_TITLES = { + "general": "Announcement", + "policy": "Policy Update", + "event": "Event Announcement", + "change": "Change Notice", + "launch": "Launch Announcement" +} + + +def generate_announcement(ann_type: str, subject: str, content: dict, urgency: str = 'normal') -> str: + """Generate announcement.""" + icon = URGENCY_ICONS.get(urgency, "šŸ“¢") + type_title = TYPE_TITLES.get(ann_type, "Announcement") + + lines = [ + f"# {icon} {type_title}: {subject}", + "", + f"**Date:** {datetime.now().strftime('%B %d, %Y')}", + ] + + if content.get("effective_date"): + lines.append(f"**Effective:** {content['effective_date']}") + + if urgency != "normal": + lines.append(f"**Priority:** {urgency.upper()}") + + lines.append("") + lines.append("---") + lines.append("") + + # Summary + if content.get("summary"): + lines.append(f"**TL;DR:** {content['summary']}") + lines.append("") + + # Details + if content.get("details"): + lines.append("## Details") + lines.append(content["details"]) + lines.append("") + + # Action Items + if content.get("action_items"): + lines.append("## Action Required") + for item in content["action_items"]: + lines.append(f"- [ ] {item}") + lines.append("") + + # Contact + if content.get("contact"): + lines.append("---") + lines.append(f"*Questions? Contact: {content['contact']}*") + + return '\n'.join(lines) + + +def main(): + parser = argparse.ArgumentParser(description='Generate announcement') + parser.add_argument('--type', required=True, + choices=['general', 'policy', 'event', 'change', 'launch']) + parser.add_argument('--subject', required=True, help='Announcement subject') + parser.add_argument('--content', required=True, help='Content JSON') + parser.add_argument('--urgency', default='normal', choices=['normal', 'important', 'urgent']) + + args = parser.parse_args() + + try: + content = json.loads(args.content) + except json.JSONDecodeError as e: + print(f"Error: Invalid content JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = generate_announcement(args.type, args.subject, content, args.urgency) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/internal-comms/tools/meeting_summary.py b/coderrr-skills/skills/internal-comms/tools/meeting_summary.py new file mode 100644 index 0000000..3c7b9aa --- /dev/null +++ b/coderrr-skills/skills/internal-comms/tools/meeting_summary.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +Generate meeting summaries. + +Usage: + python meeting_summary.py --title "Team Sync" --date "2024-02-01" --data '{...}' +""" + +import argparse +import sys +import json + + +def generate_meeting_summary(title: str, date: str, data: dict) -> str: + """Generate meeting summary.""" + lines = [ + f"# šŸ“‹ Meeting Summary: {title}", + "", + f"**Date:** {date}", + ] + + if data.get("attendees"): + lines.append(f"**Attendees:** {', '.join(data['attendees'])}") + + lines.append("") + lines.append("---") + lines.append("") + + # Agenda + if data.get("agenda"): + lines.append("## šŸ“Œ Agenda") + for i, item in enumerate(data["agenda"], 1): + lines.append(f"{i}. {item}") + lines.append("") + + # Discussion + if data.get("discussion"): + lines.append("## šŸ’¬ Discussion") + for item in data["discussion"]: + lines.append(f"### {item.get('topic', 'Topic')}") + lines.append(item.get("summary", "")) + lines.append("") + + # Decisions + if data.get("decisions"): + lines.append("## āœ… Decisions Made") + for decision in data["decisions"]: + lines.append(f"- āœ“ {decision}") + lines.append("") + + # Action Items + if data.get("action_items"): + lines.append("## šŸ“ Action Items") + lines.append("") + lines.append("| Owner | Task | Due Date |") + lines.append("|-------|------|----------|") + for item in data["action_items"]: + owner = item.get("owner", "-") + task = item.get("task", "-") + due = item.get("due", "-") + lines.append(f"| {owner} | {task} | {due} |") + lines.append("") + + # Next Meeting + if data.get("next_meeting"): + lines.append("---") + lines.append(f"**Next Meeting:** {data['next_meeting']}") + + return '\n'.join(lines) + + +def main(): + parser = argparse.ArgumentParser(description='Generate meeting summary') + parser.add_argument('--title', required=True, help='Meeting title') + parser.add_argument('--date', required=True, help='Meeting date') + parser.add_argument('--data', required=True, help='Meeting data JSON') + + args = parser.parse_args() + + try: + data = json.loads(args.data) + except json.JSONDecodeError as e: + print(f"Error: Invalid data JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = generate_meeting_summary(args.title, args.date, data) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/internal-comms/tools/newsletter.py b/coderrr-skills/skills/internal-comms/tools/newsletter.py new file mode 100644 index 0000000..7ea8df1 --- /dev/null +++ b/coderrr-skills/skills/internal-comms/tools/newsletter.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 +""" +Generate internal newsletters. + +Usage: + python newsletter.py --title "Weekly Update" --sections '[...]' +""" + +import argparse +import sys +import json +from datetime import datetime + + +def generate_newsletter_md(title: str, sections: list) -> str: + """Generate markdown newsletter.""" + lines = [ + f"# šŸ“° {title}", + f"*{datetime.now().strftime('%B %d, %Y')}*", + "", + "---", + "" + ] + + for section in sections: + section_type = section.get("type", "content") + + if section_type == "intro": + lines.append(section.get("content", "")) + lines.append("") + + elif section_type == "highlight": + lines.append(f"## 🌟 {section.get('title', 'Highlight')}") + lines.append(section.get("content", "")) + lines.append("") + + elif section_type == "updates": + lines.append("## šŸ“‹ Updates") + for item in section.get("items", []): + lines.append(f"- {item}") + lines.append("") + + elif section_type == "spotlight": + lines.append(f"## šŸ‘¤ Team Spotlight: {section.get('name', 'Team Member')}") + if section.get("role"): + lines.append(f"*{section['role']}*") + lines.append("") + lines.append(section.get("content", "")) + lines.append("") + + elif section_type == "upcoming": + lines.append("## šŸ“… Upcoming Events") + for event in section.get("events", []): + lines.append(f"- **{event.get('date', '')}**: {event.get('title', '')}") + lines.append("") + + elif section_type == "content": + if section.get("title"): + lines.append(f"## {section['title']}") + lines.append(section.get("content", "")) + lines.append("") + + lines.append("---") + lines.append("*Questions? Reply to this newsletter or reach out to the team.*") + + return '\n'.join(lines) + + +def generate_newsletter_html(title: str, sections: list) -> str: + """Generate HTML newsletter.""" + md = generate_newsletter_md(title, sections) + + return f"""<!DOCTYPE html> +<html> +<head> + <title>{title} + + + +
+
{md}
+
+ +""" + + +def newsletter(title: str, sections: list, format_type: str = 'markdown'): + """Generate newsletter.""" + if format_type == 'html': + return generate_newsletter_html(title, sections) + return generate_newsletter_md(title, sections) + + +def main(): + parser = argparse.ArgumentParser(description='Generate newsletter') + parser.add_argument('--title', required=True, help='Newsletter title') + parser.add_argument('--sections', required=True, help='Sections JSON') + parser.add_argument('--format', default='markdown', choices=['markdown', 'html']) + + args = parser.parse_args() + + try: + sections = json.loads(args.sections) + except json.JSONDecodeError as e: + print(f"Error: Invalid sections JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = newsletter(args.title, sections, args.format) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/internal-comms/tools/status_report.py b/coderrr-skills/skills/internal-comms/tools/status_report.py new file mode 100644 index 0000000..f2361d7 --- /dev/null +++ b/coderrr-skills/skills/internal-comms/tools/status_report.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +""" +Generate status reports. + +Usage: + python status_report.py --project "Team Name" --period "Week 5" --data '{...}' +""" + +import argparse +import sys +import json +from datetime import datetime + + +def generate_markdown(project: str, period: str, data: dict) -> str: + """Generate markdown status report.""" + lines = [ + f"# Status Report: {project}", + f"**Period:** {period}", + f"**Generated:** {datetime.now().strftime('%Y-%m-%d')}", + "", + "---", + "" + ] + + # Highlights + if data.get("highlights"): + lines.append("## šŸŽÆ Highlights") + for item in data["highlights"]: + lines.append(f"- {item}") + lines.append("") + + # Progress + if data.get("progress"): + lines.append("## šŸ“Š Progress") + progress = data["progress"] + if "tasks_completed" in progress: + total = progress.get("tasks_completed", 0) + progress.get("tasks_remaining", 0) + pct = (progress["tasks_completed"] / total * 100) if total > 0 else 0 + lines.append(f"- **Completed:** {progress['tasks_completed']} tasks") + lines.append(f"- **Remaining:** {progress.get('tasks_remaining', 0)} tasks") + lines.append(f"- **Progress:** {pct:.0f}%") + lines.append("") + + # Metrics + if data.get("metrics"): + lines.append("## šŸ“ˆ Metrics") + for metric, value in data["metrics"].items(): + lines.append(f"- **{metric.replace('_', ' ').title()}:** {value}") + lines.append("") + + # Blockers + if data.get("blockers"): + lines.append("## 🚧 Blockers") + for item in data["blockers"]: + lines.append(f"- āš ļø {item}") + lines.append("") + + # Next Steps + if data.get("next_steps"): + lines.append("## āž”ļø Next Steps") + for item in data["next_steps"]: + lines.append(f"- {item}") + lines.append("") + + return '\n'.join(lines) + + +def generate_html(project: str, period: str, data: dict) -> str: + """Generate HTML status report.""" + md = generate_markdown(project, period, data) + # Simple markdown to HTML conversion + html = f""" + + + Status Report: {project} + + + +
{md}
+ +""" + return html + + +def status_report(project: str, period: str, data: dict, format_type: str = 'markdown'): + """Generate status report.""" + if format_type == 'html': + return generate_html(project, period, data) + return generate_markdown(project, period, data) + + +def main(): + parser = argparse.ArgumentParser(description='Generate status report') + parser.add_argument('--project', required=True, help='Project/team name') + parser.add_argument('--period', required=True, help='Reporting period') + parser.add_argument('--data', required=True, help='Report data JSON') + parser.add_argument('--format', default='markdown', choices=['markdown', 'html']) + + args = parser.parse_args() + + try: + data = json.loads(args.data) + except json.JSONDecodeError as e: + print(f"Error: Invalid data JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = status_report(args.project, args.period, data, args.format) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/json-tools/Skills.md b/coderrr-skills/skills/json-tools/Skills.md new file mode 100644 index 0000000..c14fcaa --- /dev/null +++ b/coderrr-skills/skills/json-tools/Skills.md @@ -0,0 +1,168 @@ +--- +name: json-tools +description: Format, query, and validate JSON data. Use this skill when the user asks to pretty-print JSON, extract values from JSON, validate JSON syntax, minify JSON, or work with nested JSON structures. Provides JSONPath-like querying, formatting options, and detailed syntax validation. +--- + +This skill handles all common JSON operations using only Python's standard library. It provides formatting with configurable indentation, querying with path expressions, and validation with precise error locations. + +The user provides JSON data (as files or input) to process. They may want to format it for readability, extract specific values, or validate syntax before use. + +## Approach + +Before invoking tools, understand the JSON operation: +- **Readability**: Use `format_json` to pretty-print or minify +- **Data extraction**: Use `query_json` with path expressions +- **Syntax check**: Use `validate_json` to verify and locate errors +- **Pipeline**: Chain tools for complex operations (validate → query → format) + +## Tools + +### format_json + +Pretty-prints or minifies JSON data. Reads from file or stdin for easy piping. + +```bash +python tools/format_json.py [--file ] [--indent ] [--minify] +``` + +**Arguments:** +- `--file` (optional): Path to JSON file. If omitted, reads from stdin +- `--indent` (optional): Indentation spaces (default: 2) +- `--minify` (optional): Compress to single line + +**Output:** Formatted JSON to stdout. + +**When to use:** +- Making JSON human-readable +- Standardizing JSON formatting +- Minimizing JSON file size +- Piping output from API calls + +--- + +### query_json + +Extracts values from JSON using path expressions. Supports simple JSONPath-like syntax. + +```bash +python tools/query_json.py --file --path +``` + +**Arguments:** +- `--file` (required): Path to JSON file +- `--path` (required): Path expression (e.g., `user.name`, `items[0]`, `data[*].id`) + +**Path Syntax:** +- `key` or `.key` - Access object property +- `[0]` - Access array element by index +- `[*]` - Access ALL array elements (returns array of matched values) + +**Output:** The matched value as JSON. + +**When to use:** +- Extracting specific values from config files +- Getting nested data from API responses +- Selecting array elements +- Drilling into complex JSON structures + +**Examples:** +- `user.email` → Gets user's email +- `users[0]` → Gets first user +- `items[*].name` → Gets all item names as array +- `config.database.host` → Gets nested config value + +--- + +### validate_json + +Validates JSON syntax and reports precise error locations. + +```bash +python tools/validate_json.py --file +``` + +**Arguments:** +- `--file` (required): Path to JSON file to validate + +**Output:** JSON with validation result. If invalid, includes error message, line, and column. + +**When to use:** +- Checking JSON before parsing in code +- Debugging JSON syntax errors +- Validating user-provided JSON +- CI/CD pipeline validation + +## Common Patterns + +### Pretty Print a File +```bash +python tools/format_json.py --file config.json --indent 4 +``` + +### Minify JSON +```bash +python tools/format_json.py --file data.json --minify +``` + +### Format Piped Input +```bash +echo '{"name":"John","age":30}' | python tools/format_json.py +``` + +### Extract Nested Value +```bash +python tools/query_json.py --file response.json --path "data.user.profile.email" +``` + +### Get All IDs from Array +```bash +python tools/query_json.py --file users.json --path "users[*].id" +``` + +### Validate Before Processing +```bash +python tools/validate_json.py --file input.json +``` + +## Best Practices + +1. **Validate first** - Check syntax before querying or processing +2. **Use precise paths** - `users[0].name` is clearer than complex filtering +3. **Pipe for workflows** - Combine with other tools via stdin/stdout +4. **Consistent formatting** - Use same indent (2 or 4) across project +5. **Minify for production** - Reduce file size for deployment + +## Path Expression Examples + +Given this JSON: +```json +{ + "users": [ + {"id": 1, "name": "Alice", "roles": ["admin", "user"]}, + {"id": 2, "name": "Bob", "roles": ["user"]} + ], + "meta": {"total": 2, "page": 1} +} +``` + +| Path | Result | +|------|--------| +| `users` | The entire users array | +| `users[0]` | `{"id": 1, "name": "Alice", ...}` | +| `users[0].name` | `"Alice"` | +| `users[*].name` | `["Alice", "Bob"]` | +| `users[0].roles[0]` | `"admin"` | +| `meta.total` | `2` | + +## Error Handling + +| Exit Code | Meaning | Recovery | +|-----------|---------|----------| +| 0 | Success | - | +| 1 | Invalid file path | Verify file exists | +| 2 | JSON parsing error | Check syntax with validate_json | +| 3 | Invalid path expression | Check path syntax | + +## Dependencies + +None - uses Python's standard library only (json, re, pathlib). diff --git a/coderrr-skills/skills/json-tools/tools/format_json.py b/coderrr-skills/skills/json-tools/tools/format_json.py new file mode 100644 index 0000000..3701b1d --- /dev/null +++ b/coderrr-skills/skills/json-tools/tools/format_json.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 +""" +Format and pretty print JSON data. + +This tool reads JSON from a file or stdin and outputs it formatted +with configurable indentation, or minified. + +Usage: + python format_json.py --file data.json + echo '{"a":1}' | python format_json.py + python format_json.py --file data.json --minify + +Exit Codes: + 0 - Success + 1 - Invalid file path + 2 - JSON parsing error +""" + +import argparse +import sys +import json +from pathlib import Path + + +def format_json(data: str, indent: int = 2, minify: bool = False) -> str: + """ + Format JSON data. + + Args: + data: JSON string to format + indent: Indentation level (ignored if minify is True) + minify: If True, compress to single line + + Returns: + Formatted JSON string + + Raises: + json.JSONDecodeError: If JSON is invalid + """ + parsed = json.loads(data) + + if minify: + return json.dumps(parsed, separators=(',', ':'), ensure_ascii=False) + else: + return json.dumps(parsed, indent=indent, ensure_ascii=False) + + +def main(): + parser = argparse.ArgumentParser( + description='Format and pretty print JSON', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python format_json.py --file data.json + python format_json.py --file data.json --indent 4 + python format_json.py --file data.json --minify + echo '{"name":"John"}' | python format_json.py + ''' + ) + parser.add_argument( + '--file', + help='Path to JSON file (reads from stdin if not provided)' + ) + parser.add_argument( + '--indent', + type=int, + default=2, + help='Indentation level (default: 2)' + ) + parser.add_argument( + '--minify', + action='store_true', + help='Compress JSON to single line' + ) + + args = parser.parse_args() + + # Read JSON data + if args.file: + path = Path(args.file) + if not path.exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + try: + with open(path, 'r', encoding='utf-8') as f: + data = f.read() + except IOError as e: + print(f"Error reading file: {e}", file=sys.stderr) + sys.exit(1) + else: + if sys.stdin.isatty(): + print("Error: No input. Provide --file or pipe JSON to stdin.", file=sys.stderr) + sys.exit(1) + data = sys.stdin.read() + + # Format and output + try: + formatted = format_json(data, args.indent, args.minify) + print(formatted) + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON - {e.msg} at line {e.lineno}, column {e.colno}", file=sys.stderr) + sys.exit(2) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/json-tools/tools/query_json.py b/coderrr-skills/skills/json-tools/tools/query_json.py new file mode 100644 index 0000000..4dbd0e6 --- /dev/null +++ b/coderrr-skills/skills/json-tools/tools/query_json.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +""" +Query JSON data using path expressions. + +This tool allows querying nested JSON data using a simple path syntax +similar to JSONPath. + +Usage: + python query_json.py --file data.json --path "users[0].name" + python query_json.py --file data.json --path "items[*].id" + +Exit Codes: + 0 - Success + 1 - Invalid file path + 2 - JSON parsing error + 3 - Invalid path expression +""" + +import argparse +import sys +import json +import re +from pathlib import Path +from typing import Any, List + + +def parse_path(path: str) -> List[Any]: + """ + Parse a path expression into components. + + Supports: + - .key or key - object property + - [0] - array index + - [*] - all array elements + + Args: + path: Path expression string + + Returns: + List of path components + """ + components = [] + + # Split on dots and brackets + pattern = r'\.?([^\.\[\]]+)|\[(\d+|\*)\]' + + for match in re.finditer(pattern, path): + if match.group(1): + # Property name + components.append(match.group(1)) + elif match.group(2): + # Array index or wildcard + idx = match.group(2) + if idx == '*': + components.append('*') + else: + components.append(int(idx)) + + return components + + +def query_value(data: Any, components: List[Any]) -> Any: + """ + Query a value from data using path components. + + Args: + data: The JSON data to query + components: List of path components + + Returns: + The matched value(s) + + Raises: + KeyError: If a key doesn't exist + IndexError: If an index is out of range + TypeError: If the path is invalid for the data type + """ + if not components: + return data + + current = data + result_is_array = False + results = [] + + for i, component in enumerate(components): + remaining = components[i + 1:] + + if component == '*': + # Wildcard - apply remaining path to all elements + if not isinstance(current, list): + raise TypeError(f"Can't use [*] on non-array value") + + for item in current: + try: + result = query_value(item, remaining) + if isinstance(result, list) and remaining and remaining[-1] == '*': + results.extend(result) + else: + results.append(result) + except (KeyError, IndexError, TypeError): + pass + + return results + + elif isinstance(component, int): + # Array index + if not isinstance(current, list): + raise TypeError(f"Can't use [{component}] on non-array value") + current = current[component] + + else: + # Object property + if not isinstance(current, dict): + raise TypeError(f"Can't access '{component}' on non-object value") + if component not in current: + raise KeyError(f"Key '{component}' not found") + current = current[component] + + return current + + +def query_json(file_path: str, path: str) -> Any: + """ + Query JSON file with a path expression. + + Args: + file_path: Path to JSON file + path: Path expression + + Returns: + The matched value + """ + path_obj = Path(file_path) + + if not path_obj.exists(): + raise FileNotFoundError(f"File not found: {file_path}") + + with open(path_obj, 'r', encoding='utf-8') as f: + data = json.load(f) + + components = parse_path(path) + + if not components: + return data + + return query_value(data, components) + + +def main(): + parser = argparse.ArgumentParser( + description='Query JSON data using path expressions', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Path Syntax: + .key or key - Access object property + [0] - Access array element by index + [*] - Access all array elements + +Examples: + python query_json.py --file data.json --path "user.name" + python query_json.py --file data.json --path "users[0]" + python query_json.py --file data.json --path "items[*].id" + python query_json.py --file config.json --path "database.host" + ''' + ) + parser.add_argument( + '--file', + required=True, + help='Path to JSON file' + ) + parser.add_argument( + '--path', + required=True, + help='Path expression (e.g., "users[0].name")' + ) + + args = parser.parse_args() + + try: + result = query_json(args.file, args.path) + print(json.dumps(result, indent=2, ensure_ascii=False)) + except FileNotFoundError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON - {e.msg} at line {e.lineno}", file=sys.stderr) + sys.exit(2) + except (KeyError, IndexError, TypeError) as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(3) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/json-tools/tools/validate_json.py b/coderrr-skills/skills/json-tools/tools/validate_json.py new file mode 100644 index 0000000..cdb6178 --- /dev/null +++ b/coderrr-skills/skills/json-tools/tools/validate_json.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +""" +Validate JSON syntax. + +This tool checks if a JSON file has valid syntax and reports +specific error locations if invalid. + +Usage: + python validate_json.py --file data.json + +Exit Codes: + 0 - Success (valid JSON) + 1 - Invalid file path + 2 - Invalid JSON (with error details) +""" + +import argparse +import sys +import json +from pathlib import Path + + +def validate_json(file_path: str) -> dict: + """ + Validate JSON file syntax. + + Args: + file_path: Path to JSON file + + Returns: + Dictionary with validation result + """ + path = Path(file_path) + + if not path.exists(): + raise FileNotFoundError(f"File not found: {file_path}") + + try: + with open(path, 'r', encoding='utf-8') as f: + content = f.read() + except IOError as e: + raise IOError(f"Could not read file: {e}") + + try: + json.loads(content) + return { + 'valid': True, + 'file': str(path) + } + except json.JSONDecodeError as e: + return { + 'valid': False, + 'file': str(path), + 'error': { + 'message': e.msg, + 'line': e.lineno, + 'column': e.colno + } + } + + +def main(): + parser = argparse.ArgumentParser( + description='Validate JSON syntax', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python validate_json.py --file data.json + python validate_json.py --file config.json + ''' + ) + parser.add_argument( + '--file', + required=True, + help='Path to JSON file to validate' + ) + + args = parser.parse_args() + + try: + result = validate_json(args.file) + print(json.dumps(result, indent=2)) + + # Exit with code 2 if invalid (but still output the result) + if not result['valid']: + sys.exit(2) + + except FileNotFoundError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except IOError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/mcp-builder/Skills.md b/coderrr-skills/skills/mcp-builder/Skills.md new file mode 100644 index 0000000..bc62027 --- /dev/null +++ b/coderrr-skills/skills/mcp-builder/Skills.md @@ -0,0 +1,203 @@ +--- +name: mcp-builder +description: Guide for creating high-quality MCP (Model Context Protocol) servers. Use this skill when the user wants to build an MCP server, create MCP tools, implement MCP resources, or integrate with MCP-compatible clients. Provides scaffolding, templates, and validation. +--- + +This skill helps build MCP (Model Context Protocol) servers for extending AI agent capabilities. It provides scaffolding, templates, and best practices for creating production-quality MCP implementations. + +The user wants to create an MCP server. They may specify the type of tools, resources, or prompts they want to expose. + +## Approach + +When building MCP servers: +1. **Initialize**: Use `init_mcp` to scaffold server structure +2. **Add tools**: Use `add_mcp_tool` for each tool +3. **Add resources**: Use `add_mcp_resource` for data sources +4. **Validate**: Use `validate_mcp` to check compliance +5. **Test**: Use `test_mcp` to verify functionality + +## Tools + +### init_mcp + +Scaffolds a new MCP server project. + +```bash +python tools/init_mcp.py --name --language --output-dir [--transport ] +``` + +**Arguments:** +- `--name` (required): Server name +- `--language` (required): Implementation language - `py` or `ts` +- `--output-dir` (required): Output directory +- `--transport` (optional): Transport type (default: stdio) + +**Creates:** +``` +my-mcp-server/ +ā”œā”€ā”€ src/ +│ └── server.py (or index.ts) +ā”œā”€ā”€ pyproject.toml (or package.json) +ā”œā”€ā”€ README.md +└── tests/ +``` + +**When to use:** +- Starting a new MCP server +- Getting proper project structure +- Setting up dependencies + +--- + +### add_mcp_tool + +Adds a tool definition to the MCP server. + +```bash +python tools/add_mcp_tool.py --server-dir --name --description --parameters +``` + +**Arguments:** +- `--server-dir` (required): MCP server directory +- `--name` (required): Tool name +- `--description` (required): Tool description +- `--parameters` (required): JSON schema for parameters + +**Parameters JSON:** +```json +{ + "type": "object", + "properties": { + "query": {"type": "string", "description": "Search query"}, + "limit": {"type": "integer", "default": 10} + }, + "required": ["query"] +} +``` + +**When to use:** +- Adding server capabilities +- Exposing functions to AI agents +- Implementing tool handlers + +--- + +### add_mcp_resource + +Adds a resource definition to the MCP server. + +```bash +python tools/add_mcp_resource.py --server-dir --uri --name --description [--mime-type ] +``` + +**Arguments:** +- `--server-dir` (required): MCP server directory +- `--uri` (required): Resource URI pattern (e.g., `file:///{path}`) +- `--name` (required): Resource name +- `--description` (required): Resource description +- `--mime-type` (optional): Content type (default: text/plain) + +**When to use:** +- Exposing data sources +- Providing file access +- Sharing dynamic content + +--- + +### validate_mcp + +Validates MCP server implementation. + +```bash +python tools/validate_mcp.py --server-dir +``` + +**Output:** Validation report with compliance status and issues. + +**Checks:** +- Valid manifest structure +- Tool definitions follow schema +- Resource URIs are valid +- Handler implementations exist + +**When to use:** +- Before publishing +- CI/CD validation +- Debugging issues + +--- + +### test_mcp + +Tests MCP server functionality. + +```bash +python tools/test_mcp.py --server-dir [--tool ] [--input ] +``` + +**Arguments:** +- `--server-dir` (required): MCP server directory +- `--tool` (optional): Specific tool to test +- `--input` (optional): Test input JSON + +**When to use:** +- Verifying tool behavior +- Testing handlers +- Debugging responses + +## MCP Concepts + +### Tools +Functions the AI can invoke: +```python +@server.tool("search") +async def search(query: str, limit: int = 10): + """Search for documents.""" + return {"results": [...]} +``` + +### Resources +Data the AI can read: +```python +@server.resource("file:///{path}") +async def read_file(path: str): + """Read file contents.""" + return {"content": ...} +``` + +### Prompts +Pre-defined prompt templates: +```python +@server.prompt("summarize") +def summarize_prompt(content: str): + """Create summary prompt.""" + return f"Summarize: {content}" +``` + +## Common Patterns + +### Create Python MCP Server +```bash +python tools/init_mcp.py --name my-server --language py --output-dir ./servers +python tools/add_mcp_tool.py --server-dir ./servers/my-server --name search --description "Search documents" --parameters '{"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}' +python tools/validate_mcp.py --server-dir ./servers/my-server +``` + +### Add File Resource +```bash +python tools/add_mcp_resource.py --server-dir ./my-server --uri "file:///{path}" --name "files" --description "Access local files" --mime-type text/plain +``` + +## Best Practices + +1. **Clear descriptions** - Help AI understand when to use tools +2. **Typed parameters** - Use JSON Schema for validation +3. **Error handling** - Return meaningful error messages +4. **Async handlers** - Use async for I/O operations +5. **Test thoroughly** - Verify with various inputs + +## Dependencies + +For Python servers: +- `mcp>=1.0.0` +- `httpx>=0.25.0` (for SSE transport) diff --git a/coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py b/coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py new file mode 100644 index 0000000..14b55bf --- /dev/null +++ b/coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +""" +Add a tool to MCP server. + +Usage: + python add_mcp_tool.py --server-dir ./my-server --name search --description "Search documents" --parameters '{...}' +""" + +import argparse +import sys +import json +from pathlib import Path + + +def add_mcp_tool(server_dir: str, name: str, description: str, parameters: dict): + """Add tool to MCP server.""" + server_path = Path(server_dir) + + if not server_path.exists(): + raise ValueError(f"Server directory not found: {server_dir}") + + # Update config + config_file = server_path / 'mcp.json' + if config_file.exists(): + config = json.loads(config_file.read_text()) + else: + config = {"tools": [], "resources": []} + + tool_def = { + "name": name, + "description": description, + "parameters": parameters + } + + # Check for duplicate + if any(t["name"] == name for t in config.get("tools", [])): + raise ValueError(f"Tool '{name}' already exists") + + config["tools"].append(tool_def) + config_file.write_text(json.dumps(config, indent=2)) + + # Generate handler stub + handler_code = f''' +# Tool: {name} +# {description} +async def handle_{name.replace('-', '_')}({', '.join(parameters.get('properties', {}).keys())}): + """ + {description} + """ + # TODO: Implement tool logic + return {{"result": "Not implemented"}} +''' + + handlers_file = server_path / 'src' / 'handlers.py' + if handlers_file.exists(): + existing = handlers_file.read_text() + handlers_file.write_text(existing + handler_code) + else: + handlers_file.write_text(f'# Tool handlers\n{handler_code}') + + return { + "status": "success", + "tool": name, + "handler": str(handlers_file) + } + + +def main(): + parser = argparse.ArgumentParser(description='Add MCP tool') + parser.add_argument('--server-dir', required=True, help='Server directory') + parser.add_argument('--name', required=True, help='Tool name') + parser.add_argument('--description', required=True, help='Tool description') + parser.add_argument('--parameters', required=True, help='Parameters JSON schema') + + args = parser.parse_args() + + try: + parameters = json.loads(args.parameters) + except json.JSONDecodeError as e: + print(f"Error: Invalid parameters JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = add_mcp_tool(args.server_dir, args.name, args.description, parameters) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/mcp-builder/tools/init_mcp.py b/coderrr-skills/skills/mcp-builder/tools/init_mcp.py new file mode 100644 index 0000000..33cecd7 --- /dev/null +++ b/coderrr-skills/skills/mcp-builder/tools/init_mcp.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3 +""" +Initialize a new MCP server project. + +Usage: + python init_mcp.py --name my-server --language py --output-dir ./servers +""" + +import argparse +import sys +import json +from pathlib import Path + + +PYTHON_SERVER_TEMPLATE = '''#!/usr/bin/env python3 +""" +{name} MCP Server + +A Model Context Protocol server providing [description]. +""" + +import asyncio +from mcp.server import Server +from mcp.server.stdio import stdio_server +from mcp.types import Tool, TextContent + + +# Create server instance +server = Server("{name}") + + +# Tools registry +@server.list_tools() +async def list_tools(): + """List available tools.""" + return [ + # Add tools here + ] + + +@server.call_tool() +async def call_tool(name: str, arguments: dict): + """Handle tool calls.""" + # Implement tool handlers + raise ValueError(f"Unknown tool: {{name}}") + + +# Resources registry +@server.list_resources() +async def list_resources(): + """List available resources.""" + return [ + # Add resources here + ] + + +@server.read_resource() +async def read_resource(uri: str): + """Read resource content.""" + raise ValueError(f"Unknown resource: {{uri}}") + + +async def main(): + """Run the MCP server.""" + async with stdio_server() as (read_stream, write_stream): + await server.run(read_stream, write_stream) + + +if __name__ == "__main__": + asyncio.run(main()) +''' + + +PYPROJECT_TEMPLATE = '''[project] +name = "{name}" +version = "0.1.0" +description = "MCP server for {name}" +requires-python = ">=3.10" +dependencies = [ + "mcp>=1.0.0", +] + +[project.scripts] +{name} = "src.server:main" +''' + + +README_TEMPLATE = '''# {name} + +A Model Context Protocol (MCP) server. + +## Installation + +```bash +pip install -e . +``` + +## Usage + +Run the server: +```bash +python src/server.py +``` + +## Tools + +| Tool | Description | +|------|-------------| +| (Add tools) | (Description) | + +## Resources + +| URI Pattern | Description | +|-------------|-------------| +| (Add resources) | (Description) | + +## License + +MIT +''' + + +def init_mcp(name: str, language: str, output_dir: str, transport: str = 'stdio'): + """Initialize MCP server project.""" + server_dir = Path(output_dir) / name + + # Create directories + server_dir.mkdir(parents=True, exist_ok=True) + (server_dir / 'src').mkdir(exist_ok=True) + (server_dir / 'tests').mkdir(exist_ok=True) + + if language == 'py': + # Python server + (server_dir / 'src' / 'server.py').write_text( + PYTHON_SERVER_TEMPLATE.format(name=name) + ) + (server_dir / 'src' / '__init__.py').write_text('') + (server_dir / 'pyproject.toml').write_text( + PYPROJECT_TEMPLATE.format(name=name) + ) + + # Common files + (server_dir / 'README.md').write_text(README_TEMPLATE.format(name=name)) + + # Server config + config = { + "name": name, + "language": language, + "transport": transport, + "tools": [], + "resources": [] + } + (server_dir / 'mcp.json').write_text(json.dumps(config, indent=2)) + + return { + "status": "success", + "server_dir": str(server_dir), + "language": language, + "transport": transport + } + + +def main(): + parser = argparse.ArgumentParser(description='Initialize MCP server') + parser.add_argument('--name', required=True, help='Server name') + parser.add_argument('--language', required=True, choices=['py', 'ts']) + parser.add_argument('--output-dir', required=True, help='Output directory') + parser.add_argument('--transport', default='stdio', choices=['stdio', 'sse']) + + args = parser.parse_args() + + try: + result = init_mcp(args.name, args.language, args.output_dir, args.transport) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/mcp-builder/tools/validate_mcp.py b/coderrr-skills/skills/mcp-builder/tools/validate_mcp.py new file mode 100644 index 0000000..8f2741d --- /dev/null +++ b/coderrr-skills/skills/mcp-builder/tools/validate_mcp.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +""" +Validate MCP server implementation. + +Usage: + python validate_mcp.py --server-dir ./my-server +""" + +import argparse +import sys +import json +from pathlib import Path +import ast + + +def validate_mcp(server_dir: str) -> dict: + """Validate MCP server.""" + server_path = Path(server_dir) + issues = [] + warnings = [] + + # Check directory exists + if not server_path.exists(): + return {"valid": False, "issues": ["Server directory not found"]} + + # Check mcp.json + config_file = server_path / 'mcp.json' + if not config_file.exists(): + issues.append("mcp.json configuration file not found") + else: + try: + config = json.loads(config_file.read_text()) + + if not config.get("name"): + warnings.append("Server name not specified in mcp.json") + + tools = config.get("tools", []) + for tool in tools: + if not tool.get("name"): + issues.append("Tool missing name") + if not tool.get("description"): + warnings.append(f"Tool '{tool.get('name', 'unknown')}' missing description") + if not tool.get("parameters"): + warnings.append(f"Tool '{tool.get('name', 'unknown')}' missing parameters schema") + + except json.JSONDecodeError: + issues.append("mcp.json is not valid JSON") + + # Check server implementation + server_file = server_path / 'src' / 'server.py' + if not server_file.exists(): + server_file = server_path / 'src' / 'index.ts' + + if not server_file.exists(): + issues.append("Server implementation not found (src/server.py or src/index.ts)") + else: + # Validate Python syntax + if server_file.suffix == '.py': + try: + ast.parse(server_file.read_text()) + except SyntaxError as e: + issues.append(f"Syntax error in server.py: line {e.lineno}") + + # Check for README + if not (server_path / 'README.md').exists(): + warnings.append("README.md not found") + + return { + "valid": len(issues) == 0, + "issues": issues, + "warnings": warnings, + "tools_count": len(config.get("tools", [])) if config_file.exists() else 0, + "resources_count": len(config.get("resources", [])) if config_file.exists() else 0 + } + + +def main(): + parser = argparse.ArgumentParser(description='Validate MCP server') + parser.add_argument('--server-dir', required=True, help='Server directory') + + args = parser.parse_args() + + try: + result = validate_mcp(args.server_dir) + result["server_dir"] = args.server_dir + print(json.dumps(result, indent=2)) + + if not result["valid"]: + sys.exit(1) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pdf/Skills.md b/coderrr-skills/skills/pdf/Skills.md new file mode 100644 index 0000000..ffbb60d --- /dev/null +++ b/coderrr-skills/skills/pdf/Skills.md @@ -0,0 +1,186 @@ +--- +name: pdf +description: Comprehensive PDF toolkit for document manipulation. Use this skill when the user asks to extract text from PDFs, create PDF documents, merge or split PDFs, extract tables from PDFs, work with PDF forms, or analyze PDF structure. Handles both text extraction and PDF generation. +--- + +This skill provides complete PDF manipulation capabilities using PyPDF2 and pdfplumber for reading, and reportlab for creation. It handles text extraction, document merging/splitting, and PDF generation. + +The user provides PDF files to process or content to convert to PDF. They may want to extract information, combine documents, or create new PDFs. + +## Approach + +Before invoking tools, understand the PDF operation: +- **Extract text**: Use `extract_pdf` for text and table extraction +- **Create new PDF**: Use `create_pdf` to generate from content +- **Merge files**: Use `merge_pdf` to combine multiple PDFs +- **Split file**: Use `split_pdf` to separate pages +- **Get info**: Use `pdf_info` for metadata and structure + +## Tools + +### extract_pdf + +Extracts text and optionally tables from PDF documents. + +```bash +python tools/extract_pdf.py --file [--pages ] [--tables] [--format ] +``` + +**Arguments:** +- `--file` (required): Path to PDF file +- `--pages` (optional): Page range (e.g., "1-5", "1,3,5", "all") +- `--tables` (optional): Extract tables as structured data +- `--format` (optional): Output format - `text` or `json` (default: text) + +**Output:** Extracted text or JSON with text and tables. + +**When to use:** +- Reading PDF content +- Extracting data from reports +- Processing scanned documents (with text layer) +- Getting tabular data from PDFs + +--- + +### create_pdf + +Creates PDF documents from content specification. + +```bash +python tools/create_pdf.py --output --content [--title ] +``` + +**Arguments:** +- `--output` (required): Output PDF file path +- `--content` (required): JSON content specification +- `--title` (optional): Document title + +**Content JSON Structure:** +```json +{ + "elements": [ + {"type": "heading", "text": "Title", "size": 24}, + {"type": "paragraph", "text": "Body text..."}, + {"type": "list", "items": ["Item 1", "Item 2"]}, + {"type": "table", "headers": ["A", "B"], "rows": [["1", "2"]]}, + {"type": "page_break"} + ] +} +``` + +**When to use:** +- Generating reports +- Creating invoices +- Building PDF documents programmatically +- Converting structured data to PDF + +--- + +### merge_pdf + +Combines multiple PDF files into one. + +```bash +python tools/merge_pdf.py --files <path1> <path2> ... --output <path> +``` + +**Arguments:** +- `--files` (required): List of PDF files to merge +- `--output` (required): Output merged PDF path + +**When to use:** +- Combining report sections +- Merging scanned documents +- Creating document packages +- Assembling multi-part documents + +--- + +### split_pdf + +Splits a PDF into separate files. + +```bash +python tools/split_pdf.py --file <path> --output-dir <dir> [--pages <spec>] +``` + +**Arguments:** +- `--file` (required): PDF file to split +- `--output-dir` (required): Directory for output files +- `--pages` (optional): Page specification (e.g., "1-3,4-6" or "each" for individual pages) + +**When to use:** +- Extracting specific pages +- Breaking up large documents +- Creating individual page files +- Separating document sections + +--- + +### pdf_info + +Gets PDF metadata and structure information. + +```bash +python tools/pdf_info.py --file <path> +``` + +**Arguments:** +- `--file` (required): PDF file to analyze + +**Output:** JSON with page count, metadata, file size, and structure info. + +**When to use:** +- Checking PDF properties +- Getting page counts +- Verifying PDF integrity +- Understanding document structure + +## Common Patterns + +### Extract All Text +```bash +python tools/extract_pdf.py --file document.pdf --format text +``` + +### Extract Specific Pages +```bash +python tools/extract_pdf.py --file report.pdf --pages "1-5" +``` + +### Extract Tables as JSON +```bash +python tools/extract_pdf.py --file data.pdf --tables --format json +``` + +### Merge Multiple PDFs +```bash +python tools/merge_pdf.py --files part1.pdf part2.pdf part3.pdf --output combined.pdf +``` + +### Split Into Individual Pages +```bash +python tools/split_pdf.py --file document.pdf --output-dir ./pages --pages each +``` + +### Create Simple PDF +```bash +python tools/create_pdf.py --output report.pdf --title "Report" --content '{"elements": [{"type": "heading", "text": "Summary"}, {"type": "paragraph", "text": "Content here..."}]}' +``` + +## Best Practices + +1. **Check pdf_info first** - Understand document structure before processing +2. **Use page ranges** - Don't extract everything if you only need specific pages +3. **Handle scanned PDFs** - Some PDFs are images without text layers +4. **Preserve originals** - Merge/split create new files, don't modify originals +5. **Use tables flag** - Better structured output for tabular data + +## Dependencies + +Requires: +- `PyPDF2>=3.0.0` - PDF reading and manipulation +- `pdfplumber>=0.9.0` - Advanced text and table extraction +- `reportlab>=4.0.0` - PDF creation + +Automatically installed with the skill. diff --git a/coderrr-skills/skills/pdf/requirements.txt b/coderrr-skills/skills/pdf/requirements.txt new file mode 100644 index 0000000..c93fb29 --- /dev/null +++ b/coderrr-skills/skills/pdf/requirements.txt @@ -0,0 +1,3 @@ +PyPDF2>=3.0.0 +pdfplumber>=0.9.0 +reportlab>=4.0.0 diff --git a/coderrr-skills/skills/pdf/tools/create_pdf.py b/coderrr-skills/skills/pdf/tools/create_pdf.py new file mode 100644 index 0000000..b5cb182 --- /dev/null +++ b/coderrr-skills/skills/pdf/tools/create_pdf.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +""" +Create PDF documents from content specification. + +Usage: + python create_pdf.py --output report.pdf --content '{"elements": [...]}' +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from reportlab.lib.pagesizes import letter + from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle + from reportlab.lib.units import inch + from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, PageBreak + from reportlab.lib import colors +except ImportError: + print("Error: 'reportlab' package is required. Install with: pip install reportlab", file=sys.stderr) + sys.exit(1) + + +def create_pdf(output_path: str, content: dict, title: str = None): + """Create a PDF document from content specification.""" + doc = SimpleDocTemplate(output_path, pagesize=letter, + rightMargin=72, leftMargin=72, + topMargin=72, bottomMargin=72) + + styles = getSampleStyleSheet() + story = [] + + # Add title if provided + if title: + story.append(Paragraph(title, styles['Title'])) + story.append(Spacer(1, 0.5 * inch)) + + # Process elements + for element in content.get('elements', []): + elem_type = element.get('type', 'paragraph') + + if elem_type == 'heading': + size = element.get('size', 18) + style = ParagraphStyle('CustomHeading', parent=styles['Heading1'], fontSize=size) + story.append(Paragraph(element.get('text', ''), style)) + story.append(Spacer(1, 0.2 * inch)) + + elif elem_type == 'paragraph': + story.append(Paragraph(element.get('text', ''), styles['Normal'])) + story.append(Spacer(1, 0.1 * inch)) + + elif elem_type == 'list': + items = element.get('items', []) + for item in items: + bullet = "• " if not element.get('ordered') else f"{items.index(item) + 1}. " + story.append(Paragraph(f"{bullet}{item}", styles['Normal'])) + story.append(Spacer(1, 0.1 * inch)) + + elif elem_type == 'table': + headers = element.get('headers', []) + rows = element.get('rows', []) + data = [headers] + rows if headers else rows + + if data: + table = Table(data) + table.setStyle(TableStyle([ + ('BACKGROUND', (0, 0), (-1, 0), colors.grey), + ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), + ('ALIGN', (0, 0), (-1, -1), 'CENTER'), + ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), + ('FONTSIZE', (0, 0), (-1, 0), 12), + ('BOTTOMPADDING', (0, 0), (-1, 0), 12), + ('BACKGROUND', (0, 1), (-1, -1), colors.beige), + ('GRID', (0, 0), (-1, -1), 1, colors.black) + ])) + story.append(table) + story.append(Spacer(1, 0.2 * inch)) + + elif elem_type == 'page_break': + story.append(PageBreak()) + + elif elem_type == 'spacer': + height = element.get('height', 0.5) + story.append(Spacer(1, height * inch)) + + doc.build(story) + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Create PDF documents') + parser.add_argument('--output', required=True, help='Output PDF file path') + parser.add_argument('--content', required=True, help='JSON content specification') + parser.add_argument('--title', help='Document title') + + args = parser.parse_args() + + try: + content = json.loads(args.content) + except json.JSONDecodeError as e: + print(f"Error: Invalid content JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = create_pdf(args.output, content, args.title) + print(json.dumps({"status": "success", "file": result})) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pdf/tools/extract_pdf.py b/coderrr-skills/skills/pdf/tools/extract_pdf.py new file mode 100644 index 0000000..2707060 --- /dev/null +++ b/coderrr-skills/skills/pdf/tools/extract_pdf.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +""" +Extract text and tables from PDF documents. + +Usage: + python extract_pdf.py --file document.pdf --format text + python extract_pdf.py --file data.pdf --tables --format json +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + import pdfplumber +except ImportError: + print("Error: 'pdfplumber' package is required. Install with: pip install pdfplumber", file=sys.stderr) + sys.exit(1) + + +def parse_page_range(page_spec: str, total_pages: int) -> list: + """Parse page specification into list of page numbers (0-indexed).""" + if not page_spec or page_spec.lower() == 'all': + return list(range(total_pages)) + + pages = [] + for part in page_spec.split(','): + part = part.strip() + if '-' in part: + start, end = part.split('-') + start = int(start) - 1 + end = min(int(end), total_pages) + pages.extend(range(start, end)) + else: + pages.append(int(part) - 1) + + return [p for p in pages if 0 <= p < total_pages] + + +def extract_pdf(file_path: str, pages: str = None, extract_tables: bool = False, output_format: str = 'text'): + """Extract content from PDF.""" + with pdfplumber.open(file_path) as pdf: + page_nums = parse_page_range(pages, len(pdf.pages)) + + if output_format == 'json': + result = { + "file": str(file_path), + "total_pages": len(pdf.pages), + "extracted_pages": len(page_nums), + "content": [] + } + + for page_num in page_nums: + page = pdf.pages[page_num] + page_data = { + "page": page_num + 1, + "text": page.extract_text() or "" + } + + if extract_tables: + tables = page.extract_tables() + page_data["tables"] = tables if tables else [] + + result["content"].append(page_data) + + return json.dumps(result, indent=2) + + else: # text format + text_parts = [] + for page_num in page_nums: + page = pdf.pages[page_num] + text = page.extract_text() + if text: + text_parts.append(f"--- Page {page_num + 1} ---\n{text}") + + if extract_tables: + tables = page.extract_tables() + for i, table in enumerate(tables): + text_parts.append(f"\n[Table {i + 1}]") + for row in table: + text_parts.append(" | ".join(str(cell) if cell else "" for cell in row)) + + return '\n\n'.join(text_parts) + + +def main(): + parser = argparse.ArgumentParser(description='Extract text and tables from PDFs') + parser.add_argument('--file', required=True, help='Path to PDF file') + parser.add_argument('--pages', help='Page range (e.g., "1-5", "1,3,5", "all")') + parser.add_argument('--tables', action='store_true', help='Extract tables') + parser.add_argument('--format', choices=['text', 'json'], default='text', help='Output format') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = extract_pdf(args.file, args.pages, args.tables, args.format) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pdf/tools/merge_pdf.py b/coderrr-skills/skills/pdf/tools/merge_pdf.py new file mode 100644 index 0000000..4349743 --- /dev/null +++ b/coderrr-skills/skills/pdf/tools/merge_pdf.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +""" +Merge multiple PDF files into one. + +Usage: + python merge_pdf.py --files doc1.pdf doc2.pdf --output merged.pdf +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from PyPDF2 import PdfMerger +except ImportError: + print("Error: 'PyPDF2' package is required. Install with: pip install PyPDF2", file=sys.stderr) + sys.exit(1) + + +def merge_pdfs(file_paths: list, output_path: str): + """Merge multiple PDF files.""" + merger = PdfMerger() + + for file_path in file_paths: + if not Path(file_path).exists(): + raise FileNotFoundError(f"File not found: {file_path}") + merger.append(file_path) + + merger.write(output_path) + merger.close() + + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Merge PDF files') + parser.add_argument('--files', nargs='+', required=True, help='PDF files to merge') + parser.add_argument('--output', required=True, help='Output merged PDF path') + + args = parser.parse_args() + + try: + result = merge_pdfs(args.files, args.output) + print(json.dumps({ + "status": "success", + "file": result, + "merged_count": len(args.files) + })) + except FileNotFoundError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pdf/tools/pdf_info.py b/coderrr-skills/skills/pdf/tools/pdf_info.py new file mode 100644 index 0000000..76ffd44 --- /dev/null +++ b/coderrr-skills/skills/pdf/tools/pdf_info.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +""" +Get PDF metadata and information. + +Usage: + python pdf_info.py --file document.pdf +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from PyPDF2 import PdfReader +except ImportError: + print("Error: 'PyPDF2' package is required. Install with: pip install PyPDF2", file=sys.stderr) + sys.exit(1) + + +def get_pdf_info(file_path: str) -> dict: + """Get PDF metadata and structure information.""" + path = Path(file_path) + reader = PdfReader(file_path) + + # Get file size + file_size = path.stat().st_size + + # Get metadata + metadata = reader.metadata + meta_dict = {} + if metadata: + for key in ['/Title', '/Author', '/Subject', '/Creator', '/Producer', '/CreationDate', '/ModDate']: + if key in metadata: + meta_dict[key.lstrip('/')] = str(metadata[key]) + + # Page info + pages_info = [] + for i, page in enumerate(reader.pages[:10]): # First 10 pages + mediabox = page.mediabox + pages_info.append({ + "page": i + 1, + "width": float(mediabox.width), + "height": float(mediabox.height) + }) + + return { + "file": str(path.absolute()), + "file_size": file_size, + "file_size_human": f"{file_size / 1024:.2f} KB" if file_size < 1024 * 1024 else f"{file_size / 1024 / 1024:.2f} MB", + "page_count": len(reader.pages), + "encrypted": reader.is_encrypted, + "metadata": meta_dict, + "pages": pages_info + } + + +def main(): + parser = argparse.ArgumentParser(description='Get PDF information') + parser.add_argument('--file', required=True, help='PDF file to analyze') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = get_pdf_info(args.file) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pdf/tools/split_pdf.py b/coderrr-skills/skills/pdf/tools/split_pdf.py new file mode 100644 index 0000000..5a2042d --- /dev/null +++ b/coderrr-skills/skills/pdf/tools/split_pdf.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +""" +Split PDF into separate files. + +Usage: + python split_pdf.py --file document.pdf --output-dir ./pages --pages each +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from PyPDF2 import PdfReader, PdfWriter +except ImportError: + print("Error: 'PyPDF2' package is required. Install with: pip install PyPDF2", file=sys.stderr) + sys.exit(1) + + +def split_pdf(file_path: str, output_dir: str, pages_spec: str = 'each'): + """Split PDF into separate files.""" + reader = PdfReader(file_path) + total_pages = len(reader.pages) + output_path = Path(output_dir) + output_path.mkdir(parents=True, exist_ok=True) + + base_name = Path(file_path).stem + output_files = [] + + if pages_spec == 'each': + # Split into individual pages + for i in range(total_pages): + writer = PdfWriter() + writer.add_page(reader.pages[i]) + + output_file = output_path / f"{base_name}_page_{i + 1}.pdf" + with open(output_file, 'wb') as f: + writer.write(f) + output_files.append(str(output_file)) + + else: + # Split by ranges (e.g., "1-3,4-6,7-10") + ranges = pages_spec.split(',') + for idx, range_spec in enumerate(ranges): + range_spec = range_spec.strip() + if '-' in range_spec: + start, end = map(int, range_spec.split('-')) + else: + start = end = int(range_spec) + + writer = PdfWriter() + for page_num in range(start - 1, min(end, total_pages)): + writer.add_page(reader.pages[page_num]) + + output_file = output_path / f"{base_name}_part_{idx + 1}.pdf" + with open(output_file, 'wb') as f: + writer.write(f) + output_files.append(str(output_file)) + + return output_files + + +def main(): + parser = argparse.ArgumentParser(description='Split PDF files') + parser.add_argument('--file', required=True, help='PDF file to split') + parser.add_argument('--output-dir', required=True, help='Output directory') + parser.add_argument('--pages', default='each', help='Page spec: "each" or ranges like "1-3,4-6"') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = split_pdf(args.file, args.output_dir, args.pages) + print(json.dumps({ + "status": "success", + "files": result, + "count": len(result) + }, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pptx/Skills.md b/coderrr-skills/skills/pptx/Skills.md new file mode 100644 index 0000000..1db79fe --- /dev/null +++ b/coderrr-skills/skills/pptx/Skills.md @@ -0,0 +1,146 @@ +--- +name: pptx +description: Create, edit, and analyze PowerPoint presentations. Use this skill when the user asks to create slides, modify presentations, extract content from PPTX files, add speaker notes, or analyze presentation structure. Supports layouts, images, charts, and professional formatting. +--- + +This skill provides comprehensive PowerPoint manipulation using python-pptx. It handles presentation creation, slide editing, content extraction, and structural analysis. + +The user provides presentation requirements or existing files to process. They may want to create new presentations, modify existing ones, or extract content from slides. + +## Approach + +Before invoking tools, understand the presentation task: +- **Create new**: Use `create_pptx` with slide specifications +- **Extract content**: Use `read_pptx` to get text and structure +- **Modify slides**: Use `edit_pptx` to update content +- **Analyze structure**: Use `analyze_pptx` for presentation overview + +## Tools + +### create_pptx + +Creates PowerPoint presentations with specified slides and content. + +```bash +python tools/create_pptx.py --output <path> --title <title> --slides <json> +``` + +**Arguments:** +- `--output` (required): Output file path (.pptx) +- `--title` (required): Presentation title (first slide) +- `--slides` (required): JSON array of slide specifications + +**Slides JSON Structure:** +```json +[ + {"layout": "title", "title": "Main Title", "subtitle": "Subtitle"}, + {"layout": "content", "title": "Slide Title", "content": ["Bullet 1", "Bullet 2"]}, + {"layout": "two_content", "title": "Comparison", "left": ["Left items"], "right": ["Right items"]}, + {"layout": "section", "title": "Section Header"}, + {"layout": "blank", "notes": "Speaker notes here"} +] +``` + +**Layouts:** +- `title` - Title slide with subtitle +- `content` - Title with bullet points +- `two_content` - Two column layout +- `section` - Section header +- `blank` - Blank slide + +**When to use:** +- Generating presentations from data +- Creating report slides +- Building pitch decks +- Automating slide generation + +--- + +### read_pptx + +Extracts content from existing PowerPoint files. + +```bash +python tools/read_pptx.py --file <path> [--format <text|json|markdown>] [--include-notes] +``` + +**Arguments:** +- `--file` (required): Path to PowerPoint file +- `--format` (optional): Output format (default: text) +- `--include-notes` (optional): Include speaker notes + +**When to use:** +- Extracting presentation content +- Converting slides to other formats +- Reading speaker notes +- Processing uploaded presentations + +--- + +### edit_pptx + +Modifies existing PowerPoint presentations. + +```bash +python tools/edit_pptx.py --file <path> --output <path> --operations <json> +``` + +**Arguments:** +- `--file` (required): Input PowerPoint file +- `--output` (required): Output file path +- `--operations` (required): JSON array of operations + +**Operations:** +```json +[ + {"action": "add_slide", "layout": "content", "title": "New Slide", "content": ["Point 1"]}, + {"action": "update_slide", "index": 2, "title": "Updated Title"}, + {"action": "add_notes", "index": 1, "notes": "Speaker notes..."}, + {"action": "delete_slide", "index": 5} +] +``` + +**When to use:** +- Adding slides to existing presentations +- Updating slide content +- Adding speaker notes +- Modifying presentation structure + +--- + +### analyze_pptx + +Analyzes presentation structure and provides metadata. + +```bash +python tools/analyze_pptx.py --file <path> +``` + +**Output:** JSON with slide count, layouts used, content summary, and word count. + +**When to use:** +- Understanding presentation structure +- Auditing slide content +- Getting presentation statistics +- Quality assurance + +## Common Patterns + +### Create Simple Presentation +```bash +python tools/create_pptx.py --output deck.pptx --title "Q4 Report" --slides '[{"layout": "content", "title": "Summary", "content": ["Revenue up 15%", "New customers: 500"]}]' +``` + +### Extract All Content +```bash +python tools/read_pptx.py --file presentation.pptx --format text --include-notes +``` + +### Add Slide to Existing Deck +```bash +python tools/edit_pptx.py --file deck.pptx --output updated.pptx --operations '[{"action": "add_slide", "layout": "content", "title": "Conclusion", "content": ["Key takeaways"]}]' +``` + +## Dependencies + +Requires `python-pptx>=0.6.21`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/pptx/requirements.txt b/coderrr-skills/skills/pptx/requirements.txt new file mode 100644 index 0000000..529f7b4 --- /dev/null +++ b/coderrr-skills/skills/pptx/requirements.txt @@ -0,0 +1 @@ +python-pptx>=0.6.21 diff --git a/coderrr-skills/skills/pptx/tools/analyze_pptx.py b/coderrr-skills/skills/pptx/tools/analyze_pptx.py new file mode 100644 index 0000000..697faa8 --- /dev/null +++ b/coderrr-skills/skills/pptx/tools/analyze_pptx.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +""" +Analyze PowerPoint presentation structure. + +Usage: + python analyze_pptx.py --file presentation.pptx +""" + +import argparse +import sys +import json +from pathlib import Path +from collections import Counter + +try: + from pptx import Presentation +except ImportError: + print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) + sys.exit(1) + + +def analyze_pptx(file_path: str) -> dict: + """Analyze presentation structure.""" + prs = Presentation(file_path) + + word_count = 0 + layouts_used = Counter() + slides_with_notes = 0 + + for slide in prs.slides: + layouts_used[slide.slide_layout.name] += 1 + + for shape in slide.shapes: + if hasattr(shape, 'text'): + word_count += len(shape.text.split()) + + if slide.has_notes_slide: + notes = slide.notes_slide.notes_text_frame.text + if notes.strip(): + slides_with_notes += 1 + + return { + "file": str(file_path), + "statistics": { + "slide_count": len(prs.slides), + "word_count": word_count, + "slides_with_notes": slides_with_notes + }, + "layouts_used": dict(layouts_used.most_common()), + "dimensions": { + "width": prs.slide_width.inches, + "height": prs.slide_height.inches + } + } + + +def main(): + parser = argparse.ArgumentParser(description='Analyze PowerPoint presentations') + parser.add_argument('--file', required=True, help='Path to PowerPoint file') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = analyze_pptx(args.file) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pptx/tools/create_pptx.py b/coderrr-skills/skills/pptx/tools/create_pptx.py new file mode 100644 index 0000000..56ef84a --- /dev/null +++ b/coderrr-skills/skills/pptx/tools/create_pptx.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +""" +Create PowerPoint presentations. + +Usage: + python create_pptx.py --output deck.pptx --title "Title" --slides '[...]' +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from pptx import Presentation + from pptx.util import Inches, Pt +except ImportError: + print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) + sys.exit(1) + + +def add_slide(prs, slide_spec): + """Add a slide based on specification.""" + layout_name = slide_spec.get('layout', 'content') + + # Map layout names to indices (standard template) + layout_map = { + 'title': 0, # Title Slide + 'content': 1, # Title and Content + 'section': 2, # Section Header + 'two_content': 3, # Two Content + 'comparison': 4, # Comparison + 'blank': 6 # Blank + } + + layout_idx = layout_map.get(layout_name, 1) + layout = prs.slide_layouts[layout_idx] + slide = prs.slides.add_slide(layout) + + # Add title + if hasattr(slide.shapes, 'title') and slide.shapes.title: + slide.shapes.title.text = slide_spec.get('title', '') + + # Handle different layouts + if layout_name == 'title': + # Title slide with subtitle + if len(slide.placeholders) > 1: + subtitle = slide.placeholders[1] + subtitle.text = slide_spec.get('subtitle', '') + + elif layout_name == 'content': + # Content slide with bullets + content = slide_spec.get('content', []) + if len(slide.placeholders) > 1: + body = slide.placeholders[1] + tf = body.text_frame + tf.text = content[0] if content else '' + for item in content[1:]: + p = tf.add_paragraph() + p.text = item + p.level = 0 + + elif layout_name == 'two_content': + # Two column layout + left_content = slide_spec.get('left', []) + right_content = slide_spec.get('right', []) + + placeholders = list(slide.placeholders) + if len(placeholders) > 1 and left_content: + tf = placeholders[1].text_frame + tf.text = left_content[0] + for item in left_content[1:]: + p = tf.add_paragraph() + p.text = item + + if len(placeholders) > 2 and right_content: + tf = placeholders[2].text_frame + tf.text = right_content[0] + for item in right_content[1:]: + p = tf.add_paragraph() + p.text = item + + # Add speaker notes + if 'notes' in slide_spec: + notes_slide = slide.notes_slide + notes_slide.notes_text_frame.text = slide_spec['notes'] + + return slide + + +def create_pptx(output_path: str, title: str, slides_spec: list): + """Create a PowerPoint presentation.""" + prs = Presentation() + + # Add title slide + title_slide_layout = prs.slide_layouts[0] + title_slide = prs.slides.add_slide(title_slide_layout) + title_slide.shapes.title.text = title + + # Add content slides + for slide_spec in slides_spec: + add_slide(prs, slide_spec) + + prs.save(output_path) + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Create PowerPoint presentations') + parser.add_argument('--output', required=True, help='Output file path (.pptx)') + parser.add_argument('--title', required=True, help='Presentation title') + parser.add_argument('--slides', required=True, help='JSON array of slide specifications') + + args = parser.parse_args() + + try: + slides = json.loads(args.slides) + except json.JSONDecodeError as e: + print(f"Error: Invalid slides JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = create_pptx(args.output, args.title, slides) + print(json.dumps({"status": "success", "file": result, "slides": len(slides) + 1})) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pptx/tools/edit_pptx.py b/coderrr-skills/skills/pptx/tools/edit_pptx.py new file mode 100644 index 0000000..4fa825c --- /dev/null +++ b/coderrr-skills/skills/pptx/tools/edit_pptx.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Edit PowerPoint presentations. + +Usage: + python edit_pptx.py --file input.pptx --output output.pptx --operations '[...]' +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from pptx import Presentation +except ImportError: + print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) + sys.exit(1) + + +def apply_operation(prs, operation): + """Apply a single edit operation.""" + action = operation.get('action') + + if action == 'add_slide': + layout_map = {'title': 0, 'content': 1, 'section': 2, 'two_content': 3, 'blank': 6} + layout_idx = layout_map.get(operation.get('layout', 'content'), 1) + layout = prs.slide_layouts[layout_idx] + slide = prs.slides.add_slide(layout) + + if hasattr(slide.shapes, 'title') and slide.shapes.title: + slide.shapes.title.text = operation.get('title', '') + + content = operation.get('content', []) + if content and len(slide.placeholders) > 1: + body = slide.placeholders[1] + tf = body.text_frame + tf.text = content[0] + for item in content[1:]: + p = tf.add_paragraph() + p.text = item + + elif action == 'update_slide': + idx = operation.get('index', 1) - 1 + if 0 <= idx < len(prs.slides): + slide = prs.slides[idx] + if 'title' in operation and hasattr(slide.shapes, 'title'): + slide.shapes.title.text = operation['title'] + + elif action == 'add_notes': + idx = operation.get('index', 1) - 1 + if 0 <= idx < len(prs.slides): + slide = prs.slides[idx] + notes_slide = slide.notes_slide + notes_slide.notes_text_frame.text = operation.get('notes', '') + + elif action == 'delete_slide': + idx = operation.get('index', 1) - 1 + if 0 <= idx < len(prs.slides): + rId = prs.slides._sldIdLst[idx].rId + prs.part.drop_rel(rId) + del prs.slides._sldIdLst[idx] + + +def edit_pptx(input_path: str, output_path: str, operations: list): + """Edit a PowerPoint presentation.""" + prs = Presentation(input_path) + + for operation in operations: + apply_operation(prs, operation) + + prs.save(output_path) + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Edit PowerPoint presentations') + parser.add_argument('--file', required=True, help='Input PowerPoint file') + parser.add_argument('--output', required=True, help='Output file path') + parser.add_argument('--operations', required=True, help='JSON array of operations') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + operations = json.loads(args.operations) + except json.JSONDecodeError as e: + print(f"Error: Invalid operations JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = edit_pptx(args.file, args.output, operations) + print(json.dumps({"status": "success", "file": result})) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/pptx/tools/read_pptx.py b/coderrr-skills/skills/pptx/tools/read_pptx.py new file mode 100644 index 0000000..18969a9 --- /dev/null +++ b/coderrr-skills/skills/pptx/tools/read_pptx.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +""" +Read and extract content from PowerPoint files. + +Usage: + python read_pptx.py --file presentation.pptx --format text +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from pptx import Presentation +except ImportError: + print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) + sys.exit(1) + + +def extract_slide_text(slide): + """Extract all text from a slide.""" + texts = [] + for shape in slide.shapes: + if hasattr(shape, 'text') and shape.text: + texts.append(shape.text) + return texts + + +def read_pptx(file_path: str, output_format: str = 'text', include_notes: bool = False): + """Read PowerPoint content.""" + prs = Presentation(file_path) + + if output_format == 'json': + result = { + "file": str(file_path), + "slide_count": len(prs.slides), + "slides": [] + } + + for i, slide in enumerate(prs.slides): + slide_data = { + "number": i + 1, + "layout": slide.slide_layout.name, + "content": extract_slide_text(slide) + } + + if include_notes and slide.has_notes_slide: + notes = slide.notes_slide.notes_text_frame.text + slide_data["notes"] = notes + + result["slides"].append(slide_data) + + return json.dumps(result, indent=2) + + elif output_format == 'markdown': + lines = [] + for i, slide in enumerate(prs.slides): + lines.append(f"## Slide {i + 1}") + lines.append("") + for text in extract_slide_text(slide): + lines.append(f"- {text}") + + if include_notes and slide.has_notes_slide: + notes = slide.notes_slide.notes_text_frame.text + if notes.strip(): + lines.append("") + lines.append(f"*Notes: {notes}*") + + lines.append("") + + return '\n'.join(lines) + + else: # text + lines = [] + for i, slide in enumerate(prs.slides): + lines.append(f"=== Slide {i + 1} ===") + for text in extract_slide_text(slide): + lines.append(text) + + if include_notes and slide.has_notes_slide: + notes = slide.notes_slide.notes_text_frame.text + if notes.strip(): + lines.append(f"[Notes: {notes}]") + + lines.append("") + + return '\n'.join(lines) + + +def main(): + parser = argparse.ArgumentParser(description='Read PowerPoint files') + parser.add_argument('--file', required=True, help='Path to PowerPoint file') + parser.add_argument('--format', choices=['text', 'json', 'markdown'], default='text') + parser.add_argument('--include-notes', action='store_true', help='Include speaker notes') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = read_pptx(args.file, args.format, args.include_notes) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/skill-creator/Skills.md b/coderrr-skills/skills/skill-creator/Skills.md new file mode 100644 index 0000000..0eb8c08 --- /dev/null +++ b/coderrr-skills/skills/skill-creator/Skills.md @@ -0,0 +1,158 @@ +--- +name: skill-creator +description: Interactive tool for building new custom skills for Coderrr. Use this skill when the user wants to create a new skill, scaffold a skill structure, generate tool templates, or set up skill documentation. Guides through the complete skill creation process. +--- + +This skill helps create new skills for the Coderrr marketplace. It scaffolds the required file structure, generates tool templates, and creates proper documentation. + +The user wants to create a new skill. They may provide a name, description, and list of tools they want to include. + +## Approach + +When creating a new skill: +1. **Initialize**: Use `init_skill` to scaffold the structure +2. **Add tools**: Use `add_tool` for each tool in the skill +3. **Finalize**: Use `finalize_skill` to validate and complete + +## Tools + +### init_skill + +Scaffolds a new skill directory structure. + +```bash +python tools/init_skill.py --name <skill-name> --description <desc> --output-dir <path> [--author <name>] +``` + +**Arguments:** +- `--name` (required): Skill name (lowercase, hyphens for spaces) +- `--description` (required): One-line description of the skill +- `--output-dir` (required): Directory to create skill in +- `--author` (optional): Skill author name + +**Creates:** +``` +skill-name/ +ā”œā”€ā”€ Skills.md # Documentation template +ā”œā”€ā”€ requirements.txt # Empty dependencies file +└── tools/ # Empty tools directory +``` + +**When to use:** +- Starting a new skill project +- Setting up skill structure +- Creating skill scaffolding + +--- + +### add_tool + +Generates a tool template with proper structure. + +```bash +python tools/add_tool.py --skill-dir <path> --tool-name <name> --description <desc> [--args <json>] +``` + +**Arguments:** +- `--skill-dir` (required): Path to skill directory +- `--tool-name` (required): Tool name (lowercase, underscores) +- `--description` (required): What the tool does +- `--args` (optional): JSON array of argument definitions + +**Args JSON:** +```json +[ + {"name": "input", "type": "string", "required": true, "help": "Input file path"}, + {"name": "output", "type": "string", "required": false, "help": "Output file path"}, + {"name": "verbose", "type": "flag", "help": "Enable verbose output"} +] +``` + +**When to use:** +- Adding tools to a skill +- Generating tool boilerplate +- Setting up argument parsing + +--- + +### finalize_skill + +Validates and finalizes a skill for publishing. + +```bash +python tools/finalize_skill.py --skill-dir <path> [--validate-only] +``` + +**Arguments:** +- `--skill-dir` (required): Path to skill directory +- `--validate-only` (optional): Only validate, don't modify + +**Validates:** +- Skills.md has required fields +- All tools have valid Python syntax +- requirements.txt is present +- Tools have docstrings and argparse + +**When to use:** +- Before publishing a skill +- Checking skill structure +- Validating tool implementations + +--- + +### list_templates + +Lists available tool templates for common patterns. + +```bash +python tools/list_templates.py [--category <category>] +``` + +**Categories:** +- `file` - File processing tools +- `web` - Web/HTTP tools +- `data` - Data manipulation tools +- `cli` - CLI interaction tools + +**When to use:** +- Finding template inspiration +- Exploring common patterns +- Starting with working examples + +## Skill Creation Workflow + +### Step 1: Initialize +```bash +python tools/init_skill.py --name my-skill --description "Description here" --output-dir ./skills +``` + +### Step 2: Add Tools +```bash +python tools/add_tool.py --skill-dir ./skills/my-skill --tool-name process_data --description "Process data files" --args '[{"name": "input", "type": "string", "required": true, "help": "Input file"}]' +``` + +### Step 3: Implement Tool Logic +Edit the generated tool file to add your implementation. + +### Step 4: Validate +```bash +python tools/finalize_skill.py --skill-dir ./skills/my-skill --validate-only +``` + +### Step 5: Finalize +```bash +python tools/finalize_skill.py --skill-dir ./skills/my-skill +``` + +## Best Practices + +1. **Use descriptive names** - Both skill and tool names should be clear +2. **Write detailed descriptions** - Help users understand when to use the skill +3. **Include examples** - Show real usage in Skills.md +4. **Handle errors gracefully** - Use proper exit codes and stderr +5. **Output JSON** - Structured output is easier to parse +6. **Document arguments** - Help text for every argument + +## Dependencies + +None - uses Python's standard library only. diff --git a/coderrr-skills/skills/skill-creator/tools/add_tool.py b/coderrr-skills/skills/skill-creator/tools/add_tool.py new file mode 100644 index 0000000..ea86986 --- /dev/null +++ b/coderrr-skills/skills/skill-creator/tools/add_tool.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 +""" +Add a tool template to a skill. + +Usage: + python add_tool.py --skill-dir ./my-skill --tool-name process_data --description "Process data" +""" + +import argparse +import sys +import json +from pathlib import Path + + +TOOL_TEMPLATE = '''#!/usr/bin/env python3 +""" +{description} + +Usage: + python {tool_name}.py {usage_args} +""" + +import argparse +import sys +import json + + +def {function_name}({function_args}): + """ + {description} + + Args: +{args_docstring} + + Returns: + dict: Result of the operation + """ + # TODO: Implement tool logic here + result = {{ + "status": "success" + }} + + return result + + +def main(): + parser = argparse.ArgumentParser(description='{description}') +{arg_parser_code} + + args = parser.parse_args() + + try: + result = {function_name}({function_call_args}) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {{e}}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() +''' + + +def generate_tool(skill_dir: str, tool_name: str, description: str, args_spec: list = None): + """Generate a tool template.""" + tools_dir = Path(skill_dir) / 'tools' + tools_dir.mkdir(exist_ok=True) + + args_spec = args_spec or [] + + # Generate code parts + function_name = tool_name.replace('-', '_') + function_args = ', '.join(arg['name'] for arg in args_spec) if args_spec else '' + + # Usage line args + usage_parts = [] + for arg in args_spec: + if arg.get('required'): + usage_parts.append(f"--{arg['name']} <{arg['name']}>") + else: + usage_parts.append(f"[--{arg['name']} <{arg['name']}>]") + usage_args = ' '.join(usage_parts) if usage_parts else '[options]' + + # Args docstring + if args_spec: + args_docstring = '\n'.join(f" {arg['name']}: {arg.get('help', 'No description')}" for arg in args_spec) + else: + args_docstring = ' None' + + # Argparser code + arg_parser_lines = [] + for arg in args_spec: + arg_type = arg.get('type', 'string') + required = arg.get('required', False) + help_text = arg.get('help', '') + + if arg_type == 'flag': + arg_parser_lines.append(f" parser.add_argument('--{arg['name']}', action='store_true', help='{help_text}')") + else: + req_str = ', required=True' if required else '' + arg_parser_lines.append(f" parser.add_argument('--{arg['name']}'{req_str}, help='{help_text}')") + + arg_parser_code = '\n'.join(arg_parser_lines) if arg_parser_lines else " # No arguments defined" + + # Function call args + function_call_args = ', '.join(f"args.{arg['name']}" for arg in args_spec) if args_spec else '' + + # Generate code + code = TOOL_TEMPLATE.format( + description=description, + tool_name=tool_name, + usage_args=usage_args, + function_name=function_name, + function_args=function_args, + args_docstring=args_docstring, + arg_parser_code=arg_parser_code, + function_call_args=function_call_args + ) + + # Write file + tool_file = tools_dir / f'{tool_name}.py' + tool_file.write_text(code) + + return { + "status": "success", + "file": str(tool_file), + "tool_name": tool_name + } + + +def main(): + parser = argparse.ArgumentParser(description='Add a tool to a skill') + parser.add_argument('--skill-dir', required=True, help='Skill directory') + parser.add_argument('--tool-name', required=True, help='Tool name') + parser.add_argument('--description', required=True, help='Tool description') + parser.add_argument('--args', help='JSON array of argument definitions') + + args = parser.parse_args() + + args_spec = [] + if args.args: + try: + args_spec = json.loads(args.args) + except json.JSONDecodeError as e: + print(f"Error: Invalid args JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = generate_tool(args.skill_dir, args.tool_name, args.description, args_spec) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/skill-creator/tools/finalize_skill.py b/coderrr-skills/skills/skill-creator/tools/finalize_skill.py new file mode 100644 index 0000000..69aebb8 --- /dev/null +++ b/coderrr-skills/skills/skill-creator/tools/finalize_skill.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +""" +Validate and finalize a skill for publishing. + +Usage: + python finalize_skill.py --skill-dir ./my-skill +""" + +import argparse +import sys +import json +import ast +from pathlib import Path +import re + + +def validate_skill(skill_dir: str) -> dict: + """Validate skill structure and files.""" + skill_path = Path(skill_dir) + issues = [] + warnings = [] + + # Check Skills.md exists + skills_md = skill_path / 'Skills.md' + if not skills_md.exists(): + issues.append("Skills.md not found") + else: + content = skills_md.read_text() + + # Check frontmatter + if not content.startswith('---'): + issues.append("Skills.md missing YAML frontmatter") + else: + # Extract frontmatter + match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) + if match: + frontmatter = match.group(1) + if 'name:' not in frontmatter: + issues.append("Skills.md missing 'name' in frontmatter") + if 'description:' not in frontmatter: + issues.append("Skills.md missing 'description' in frontmatter") + + # Check requirements.txt exists + if not (skill_path / 'requirements.txt').exists(): + warnings.append("requirements.txt not found (optional but recommended)") + + # Check tools directory + tools_dir = skill_path / 'tools' + if not tools_dir.exists(): + issues.append("tools/ directory not found") + else: + tool_files = list(tools_dir.glob('*.py')) + if not tool_files: + warnings.append("No Python tool files found in tools/") + + # Validate each tool + for tool_file in tool_files: + try: + source = tool_file.read_text() + ast.parse(source) + + # Check for argparse + if 'argparse' not in source: + warnings.append(f"{tool_file.name}: No argparse import found") + + # Check for docstring + if '"""' not in source and "'''" not in source: + warnings.append(f"{tool_file.name}: No docstring found") + + except SyntaxError as e: + issues.append(f"{tool_file.name}: Syntax error at line {e.lineno}") + + return { + "valid": len(issues) == 0, + "issues": issues, + "warnings": warnings, + "tool_count": len(list((skill_path / 'tools').glob('*.py'))) if (skill_path / 'tools').exists() else 0 + } + + +def main(): + parser = argparse.ArgumentParser(description='Validate and finalize a skill') + parser.add_argument('--skill-dir', required=True, help='Skill directory') + parser.add_argument('--validate-only', action='store_true', help='Only validate') + + args = parser.parse_args() + + if not Path(args.skill_dir).exists(): + print(f"Error: Directory not found: {args.skill_dir}", file=sys.stderr) + sys.exit(1) + + try: + result = validate_skill(args.skill_dir) + result["skill_dir"] = args.skill_dir + + if result["valid"]: + result["message"] = "Skill is valid and ready for publishing" + else: + result["message"] = "Skill has issues that must be fixed" + + print(json.dumps(result, indent=2)) + + if not result["valid"]: + sys.exit(1) + + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/skill-creator/tools/init_skill.py b/coderrr-skills/skills/skill-creator/tools/init_skill.py new file mode 100644 index 0000000..95213a7 --- /dev/null +++ b/coderrr-skills/skills/skill-creator/tools/init_skill.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +""" +Initialize a new skill directory structure. + +Usage: + python init_skill.py --name my-skill --description "Description" --output-dir ./skills +""" + +import argparse +import sys +import json +from pathlib import Path + + +SKILLS_MD_TEMPLATE = '''--- +name: {name} +description: {description} +--- + +This skill provides [detailed description of what the skill does]. + +The user provides [what input the user gives]. They may want to [what they want to accomplish]. + +## Approach + +Before invoking tools, understand [how to decide which tool to use]: +- **Scenario 1**: Use `tool_name` for [use case] +- **Scenario 2**: Use `other_tool` for [other use case] + +## Tools + +[Add tool documentation here] + +## Common Patterns + +[Add usage examples here] + +## Best Practices + +1. [Best practice 1] +2. [Best practice 2] + +## Dependencies + +[List dependencies or "None - uses Python's standard library only."] +''' + + +def init_skill(name: str, description: str, output_dir: str, author: str = None): + """Initialize a new skill directory.""" + skill_dir = Path(output_dir) / name + + # Create directories + skill_dir.mkdir(parents=True, exist_ok=True) + (skill_dir / 'tools').mkdir(exist_ok=True) + + # Create Skills.md + skills_md = SKILLS_MD_TEMPLATE.format(name=name, description=description) + (skill_dir / 'Skills.md').write_text(skills_md) + + # Create empty requirements.txt + (skill_dir / 'requirements.txt').write_text('# Add dependencies here, one per line\n') + + return { + "status": "success", + "skill_dir": str(skill_dir), + "files_created": [ + str(skill_dir / 'Skills.md'), + str(skill_dir / 'requirements.txt'), + str(skill_dir / 'tools') + ] + } + + +def main(): + parser = argparse.ArgumentParser(description='Initialize a new skill') + parser.add_argument('--name', required=True, help='Skill name') + parser.add_argument('--description', required=True, help='Skill description') + parser.add_argument('--output-dir', required=True, help='Output directory') + parser.add_argument('--author', help='Skill author') + + args = parser.parse_args() + + # Validate name + if not args.name.replace('-', '').replace('_', '').isalnum(): + print("Error: Skill name must be alphanumeric with hyphens/underscores", file=sys.stderr) + sys.exit(1) + + try: + result = init_skill(args.name, args.description, args.output_dir, args.author) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/skill-creator/tools/list_templates.py b/coderrr-skills/skills/skill-creator/tools/list_templates.py new file mode 100644 index 0000000..f76fe85 --- /dev/null +++ b/coderrr-skills/skills/skill-creator/tools/list_templates.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +""" +List available tool templates. + +Usage: + python list_templates.py --category file +""" + +import argparse +import sys +import json + + +TEMPLATES = { + "file": [ + { + "name": "file_reader", + "description": "Read and parse various file formats", + "args": ["--file", "--format"] + }, + { + "name": "file_writer", + "description": "Write content to files with formatting", + "args": ["--output", "--content", "--format"] + }, + { + "name": "file_converter", + "description": "Convert between file formats", + "args": ["--input", "--output", "--from-format", "--to-format"] + } + ], + "web": [ + { + "name": "http_client", + "description": "Make HTTP requests with custom headers", + "args": ["--url", "--method", "--headers", "--data"] + }, + { + "name": "html_parser", + "description": "Parse and extract from HTML", + "args": ["--html", "--selector", "--format"] + }, + { + "name": "url_validator", + "description": "Validate and analyze URLs", + "args": ["--url", "--check-accessibility"] + } + ], + "data": [ + { + "name": "json_processor", + "description": "Process and transform JSON data", + "args": ["--input", "--query", "--transform"] + }, + { + "name": "csv_handler", + "description": "Read, write, and transform CSV", + "args": ["--file", "--columns", "--filter"] + }, + { + "name": "data_validator", + "description": "Validate data against schemas", + "args": ["--data", "--schema", "--format"] + } + ], + "cli": [ + { + "name": "command_runner", + "description": "Execute shell commands safely", + "args": ["--command", "--timeout", "--capture"] + }, + { + "name": "interactive_prompt", + "description": "Interactive user prompts", + "args": ["--prompt", "--type", "--default"] + } + ] +} + + +def list_templates(category: str = None): + """List available templates.""" + if category: + if category not in TEMPLATES: + return { + "error": f"Unknown category: {category}", + "available": list(TEMPLATES.keys()) + } + return { + "category": category, + "templates": TEMPLATES[category] + } + else: + return { + "categories": list(TEMPLATES.keys()), + "total_templates": sum(len(t) for t in TEMPLATES.values()), + "templates": TEMPLATES + } + + +def main(): + parser = argparse.ArgumentParser(description='List tool templates') + parser.add_argument('--category', choices=['file', 'web', 'data', 'cli'], + help='Filter by category') + + args = parser.parse_args() + + result = list_templates(args.category) + print(json.dumps(result, indent=2)) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/web-scraper/Skills.md b/coderrr-skills/skills/web-scraper/Skills.md new file mode 100644 index 0000000..8a1362e --- /dev/null +++ b/coderrr-skills/skills/web-scraper/Skills.md @@ -0,0 +1,103 @@ +--- +name: web-scraper +description: Fetch, parse, and extract content from web pages. Use this skill when the user asks to scrape websites, extract text from URLs, parse HTML content, download web pages, or analyze website content. Handles HTTP requests, HTML parsing, CSS selector targeting, and clean text extraction. +--- + +This skill enables fetching and parsing web content with production-grade error handling. It handles the full pipeline from HTTP request to clean text output, with support for CSS selectors to target specific elements. + +The user provides a URL or HTML content to process. They may want the raw HTML, extracted text, or content from specific elements on the page. + +## Approach + +Before invoking tools, understand what the user needs: +- **Raw HTML**: Use `fetch_page` alone when they need the full page source +- **Clean Text**: Chain `fetch_page` with `extract_text` for readable content +- **Specific Elements**: Use `--selector` to target navigation, articles, headers, or any CSS-selectable content +- **Batch Processing**: For multiple URLs, invoke `fetch_page` sequentially and aggregate results + +## Tools + +### fetch_page + +Fetches raw HTML content from any URL. Includes proper User-Agent headers to avoid bot detection. + +```bash +python tools/fetch_page.py --url <url> [--timeout <seconds>] +``` + +**Arguments:** +- `--url` (required): The complete URL including http:// or https:// +- `--timeout` (optional): Request timeout in seconds (default: 30) + +**Output:** Raw HTML to stdout. Errors to stderr with appropriate exit codes. + +**When to use:** +- User wants to see the page source +- First step before text extraction +- Checking if a URL is accessible +- Downloading page content for later analysis + +--- + +### extract_text + +Parses HTML and extracts clean, readable text. Automatically removes scripts, styles, navigation, headers, and footers for cleaner output. + +```bash +python tools/extract_text.py [--html <html_string>] [--selector <css_selector>] +``` + +**Arguments:** +- `--html` (optional): HTML string to parse. If omitted, reads from stdin (for piping) +- `--selector` (optional): CSS selector to target specific elements (e.g., `.article`, `#main`, `h1, h2, h3`) + +**Output:** Clean text with normalized whitespace. + +**When to use:** +- User wants readable text, not HTML +- Extracting article content from news sites +- Getting text from specific page sections +- Processing HTML that was previously fetched or provided + +## Common Patterns + +### Full Page Text Extraction +```bash +python tools/fetch_page.py --url https://example.com | python tools/extract_text.py +``` + +### Extract Only Main Content +```bash +python tools/fetch_page.py --url https://blog.example.com/post | python tools/extract_text.py --selector "article, .post-content, main" +``` + +### Extract Headlines +```bash +python tools/fetch_page.py --url https://news.site.com | python tools/extract_text.py --selector "h1, h2, h3" +``` + +### Check Page Accessibility +```bash +python tools/fetch_page.py --url https://example.com --timeout 10 +``` + +## Best Practices + +1. **Always handle errors gracefully** - Network requests can fail. Check exit codes and stderr. +2. **Use specific selectors when possible** - `.article-body` gives cleaner results than extracting everything. +3. **Respect rate limits** - Add delays between requests when processing multiple URLs. +4. **Verify URLs** - Ensure URLs include the protocol (http:// or https://). +5. **Consider timeouts** - Long timeouts for slow servers, short for quick checks. + +## Error Handling + +| Exit Code | Meaning | Recovery | +|-----------|---------|----------| +| 0 | Success | - | +| 1 | Network error, invalid URL, or HTTP error | Check URL format, verify site is accessible | +| 2 | HTML parsing error | Verify HTML is valid, check selector syntax | +| 3 | Invalid CSS selector | Fix selector syntax | + +## Dependencies + +Requires `requests>=2.28.0` and `beautifulsoup4>=4.11.0`. These are automatically installed with the skill. diff --git a/coderrr-skills/skills/web-scraper/requirements.txt b/coderrr-skills/skills/web-scraper/requirements.txt new file mode 100644 index 0000000..a057c2e --- /dev/null +++ b/coderrr-skills/skills/web-scraper/requirements.txt @@ -0,0 +1,2 @@ +requests>=2.28.0 +beautifulsoup4>=4.11.0 diff --git a/coderrr-skills/skills/web-scraper/tools/extract_text.py b/coderrr-skills/skills/web-scraper/tools/extract_text.py new file mode 100644 index 0000000..748238d --- /dev/null +++ b/coderrr-skills/skills/web-scraper/tools/extract_text.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +""" +Extract text content from HTML. + +This tool parses HTML and extracts clean, readable text. It can read HTML +from a command-line argument or from stdin, making it easy to pipe from +other commands like fetch_page. + +Usage: + python extract_text.py --html "<div>Hello World</div>" + cat page.html | python extract_text.py + python extract_text.py --selector ".main-content" < page.html + +Exit Codes: + 0 - Success + 2 - HTML parsing error + 3 - Invalid CSS selector +""" + +import argparse +import sys +import re + +try: + from bs4 import BeautifulSoup +except ImportError: + print("Error: 'beautifulsoup4' package is required. Install with: pip install beautifulsoup4", file=sys.stderr) + sys.exit(1) + + +def extract_text(html: str, selector: str = None) -> str: + """ + Extract text content from HTML. + + Args: + html: The HTML content to parse + selector: Optional CSS selector to target specific elements + + Returns: + Clean text extracted from the HTML + + Raises: + ValueError: If the selector is invalid + """ + try: + soup = BeautifulSoup(html, 'html.parser') + except Exception as e: + raise ValueError(f"Failed to parse HTML: {e}") + + # Remove script and style elements + for element in soup(['script', 'style', 'noscript', 'header', 'footer', 'nav']): + element.decompose() + + if selector: + try: + elements = soup.select(selector) + if not elements: + return "" + text_parts = [elem.get_text(separator=' ', strip=True) for elem in elements] + text = '\n\n'.join(text_parts) + except Exception as e: + raise ValueError(f"Invalid CSS selector '{selector}': {e}") + else: + text = soup.get_text(separator=' ', strip=True) + + # Clean up whitespace + text = re.sub(r'\s+', ' ', text) + text = re.sub(r'\n\s*\n', '\n\n', text) + text = text.strip() + + return text + + +def main(): + parser = argparse.ArgumentParser( + description='Extract text content from HTML', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python extract_text.py --html "<div>Hello World</div>" + cat page.html | python extract_text.py + python extract_text.py --selector "article" < page.html + echo "<p>Test</p>" | python extract_text.py --selector "p" + ''' + ) + parser.add_argument( + '--html', + help='HTML content to parse (if not provided, reads from stdin)' + ) + parser.add_argument( + '--selector', + help='CSS selector to target specific elements (e.g., ".content", "article", "h1")' + ) + + args = parser.parse_args() + + # Get HTML from argument or stdin + if args.html: + html = args.html + else: + if sys.stdin.isatty(): + print("Error: No HTML provided. Use --html argument or pipe HTML to stdin.", file=sys.stderr) + sys.exit(2) + html = sys.stdin.read() + + if not html.strip(): + print("Error: Empty HTML content", file=sys.stderr) + sys.exit(2) + + try: + text = extract_text(html, args.selector) + if text: + print(text) + else: + if args.selector: + print(f"No content found matching selector: {args.selector}", file=sys.stderr) + except ValueError as e: + if "selector" in str(e).lower(): + print(f"Error: {e}", file=sys.stderr) + sys.exit(3) + else: + print(f"Error: {e}", file=sys.stderr) + sys.exit(2) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/web-scraper/tools/fetch_page.py b/coderrr-skills/skills/web-scraper/tools/fetch_page.py new file mode 100644 index 0000000..0915771 --- /dev/null +++ b/coderrr-skills/skills/web-scraper/tools/fetch_page.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +""" +Fetch HTML content from a URL. + +This tool makes an HTTP GET request to the specified URL and outputs +the HTML content to stdout. + +Usage: + python fetch_page.py --url https://example.com + +Exit Codes: + 0 - Success + 1 - Network error or invalid URL +""" + +import argparse +import sys + +try: + import requests +except ImportError: + print("Error: 'requests' package is required. Install with: pip install requests", file=sys.stderr) + sys.exit(1) + + +def fetch_page(url: str, timeout: int = 30) -> str: + """ + Fetch the HTML content from a URL. + + Args: + url: The URL to fetch + timeout: Request timeout in seconds + + Returns: + The HTML content as a string + + Raises: + requests.RequestException: If the request fails + """ + headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' + } + + response = requests.get(url, headers=headers, timeout=timeout) + response.raise_for_status() + + return response.text + + +def main(): + parser = argparse.ArgumentParser( + description='Fetch HTML content from a URL', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=''' +Examples: + python fetch_page.py --url https://example.com + python fetch_page.py --url https://example.com --timeout 60 + ''' + ) + parser.add_argument( + '--url', + required=True, + help='The URL to fetch' + ) + parser.add_argument( + '--timeout', + type=int, + default=30, + help='Request timeout in seconds (default: 30)' + ) + + args = parser.parse_args() + + try: + html = fetch_page(args.url, args.timeout) + print(html) + except requests.exceptions.MissingSchema: + print(f"Error: Invalid URL format. Make sure to include http:// or https://", file=sys.stderr) + sys.exit(1) + except requests.exceptions.ConnectionError: + print(f"Error: Failed to connect to {args.url}", file=sys.stderr) + sys.exit(1) + except requests.exceptions.Timeout: + print(f"Error: Request timed out after {args.timeout} seconds", file=sys.stderr) + sys.exit(1) + except requests.exceptions.HTTPError as e: + print(f"Error: HTTP {e.response.status_code} - {e.response.reason}", file=sys.stderr) + sys.exit(1) + except requests.exceptions.RequestException as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/webapp-testing/Skills.md b/coderrr-skills/skills/webapp-testing/Skills.md new file mode 100644 index 0000000..25c9e8c --- /dev/null +++ b/coderrr-skills/skills/webapp-testing/Skills.md @@ -0,0 +1,211 @@ +--- +name: webapp-testing +description: Test local web applications using Playwright browser automation. Use this skill when the user wants to test web applications, automate browser interactions, take screenshots, verify UI elements, or run end-to-end tests on web pages. +--- + +This skill provides browser automation and testing capabilities using Playwright. It handles page navigation, element interaction, screenshot capture, and test verification. + +The user wants to test a web application. They may want to verify UI elements, test user flows, check responsiveness, or capture screenshots. + +## Approach + +When testing web applications: +1. **Start**: Use `start_browser` to launch browser session +2. **Navigate**: Use `navigate` to open pages +3. **Interact**: Use `interact` to click, type, or scroll +4. **Verify**: Use `verify` to check elements and content +5. **Capture**: Use `screenshot` to save visual state +6. **Report**: Use `generate_report` to summarize results + +## Tools + +### start_browser + +Launches a browser session for testing. + +```bash +python tools/start_browser.py [--browser <chromium|firefox|webkit>] [--headless] [--viewport <WxH>] +``` + +**Arguments:** +- `--browser` (optional): Browser engine (default: chromium) +- `--headless` (optional): Run without visible window +- `--viewport` (optional): Viewport size (e.g., "1920x1080") + +**Output:** Session ID for subsequent commands. + +**When to use:** +- Starting a test session +- Configuring browser options +- Setting viewport for responsive testing + +--- + +### navigate + +Navigates to a URL. + +```bash +python tools/navigate.py --session <id> --url <url> [--wait-until <event>] +``` + +**Arguments:** +- `--session` (required): Session ID from start_browser +- `--url` (required): URL to navigate to +- `--wait-until` (optional): Wait condition - `load`, `domcontentloaded`, `networkidle` + +**When to use:** +- Opening test pages +- Navigating between routes +- Starting user flows + +--- + +### interact + +Interacts with page elements. + +```bash +python tools/interact.py --session <id> --action <action> --selector <selector> [--value <value>] +``` + +**Arguments:** +- `--session` (required): Session ID +- `--action` (required): Action - `click`, `type`, `fill`, `hover`, `scroll`, `select` +- `--selector` (required): CSS selector or text selector +- `--value` (optional): Value for type/fill/select actions + +**Selector formats:** +- CSS: `#id`, `.class`, `button[type="submit"]` +- Text: `text=Login`, `text="Sign Up"` +- Role: `role=button[name="Submit"]` + +**When to use:** +- Clicking buttons +- Filling forms +- Hovering for tooltips +- Scrolling pages + +--- + +### verify + +Verifies page state and elements. + +```bash +python tools/verify.py --session <id> --check <type> [--selector <selector>] [--expected <value>] +``` + +**Arguments:** +- `--session` (required): Session ID +- `--check` (required): Check type - `visible`, `hidden`, `text`, `value`, `title`, `url` +- `--selector` (optional): Element selector (for element checks) +- `--expected` (optional): Expected value for comparison + +**When to use:** +- Verifying element visibility +- Checking text content +- Validating form values +- Confirming navigation + +--- + +### screenshot + +Captures page screenshot. + +```bash +python tools/screenshot.py --session <id> --output <path> [--selector <selector>] [--full-page] +``` + +**Arguments:** +- `--session` (required): Session ID +- `--output` (required): Output file path +- `--selector` (optional): Capture specific element only +- `--full-page` (optional): Capture entire scrollable page + +**When to use:** +- Visual regression testing +- Documenting test results +- Bug reporting +- Before/after comparisons + +--- + +### generate_report + +Generates test report from session. + +```bash +python tools/generate_report.py --session <id> --output <path> [--format <html|json|markdown>] +``` + +**Arguments:** +- `--session` (required): Session ID +- `--output` (required): Report output path +- `--format` (optional): Report format (default: html) + +**When to use:** +- Summarizing test results +- Creating documentation +- Sharing results + +## Common Patterns + +### Test Login Flow +```bash +# Start browser +python tools/start_browser.py --headless +# Navigate to login page +python tools/navigate.py --session $SESSION --url http://localhost:3000/login +# Fill credentials +python tools/interact.py --session $SESSION --action fill --selector "#email" --value "test@example.com" +python tools/interact.py --session $SESSION --action fill --selector "#password" --value "password123" +# Click login +python tools/interact.py --session $SESSION --action click --selector "button[type=submit]" +# Verify success +python tools/verify.py --session $SESSION --check url --expected "/dashboard" +``` + +### Responsive Testing +```bash +# Mobile viewport +python tools/start_browser.py --viewport 375x667 +python tools/navigate.py --session $SESSION --url http://localhost:3000 +python tools/screenshot.py --session $SESSION --output mobile.png + +# Desktop viewport +python tools/start_browser.py --viewport 1920x1080 +python tools/navigate.py --session $SESSION --url http://localhost:3000 +python tools/screenshot.py --session $SESSION --output desktop.png +``` + +### Visual Regression +```bash +python tools/start_browser.py --headless +python tools/navigate.py --session $SESSION --url http://localhost:3000 +python tools/screenshot.py --session $SESSION --output current.png --full-page +``` + +## Best Practices + +1. **Use headless for CI** - No display needed in pipelines +2. **Wait for network idle** - Ensure page fully loaded +3. **Prefer role selectors** - More resilient than CSS +4. **Take screenshots on failure** - Helps debugging +5. **Clean up sessions** - Don't leave browsers running + +## Viewport Presets + +| Device | Viewport | +|--------|----------| +| Mobile S | 320x568 | +| Mobile M | 375x667 | +| Mobile L | 425x812 | +| Tablet | 768x1024 | +| Laptop | 1366x768 | +| Desktop | 1920x1080 | + +## Dependencies + +Requires `playwright>=1.40.0`. Run `playwright install` after pip install. diff --git a/coderrr-skills/skills/webapp-testing/requirements.txt b/coderrr-skills/skills/webapp-testing/requirements.txt new file mode 100644 index 0000000..4777061 --- /dev/null +++ b/coderrr-skills/skills/webapp-testing/requirements.txt @@ -0,0 +1 @@ +playwright>=1.40.0 diff --git a/coderrr-skills/skills/webapp-testing/tools/interact.py b/coderrr-skills/skills/webapp-testing/tools/interact.py new file mode 100644 index 0000000..97fd90f --- /dev/null +++ b/coderrr-skills/skills/webapp-testing/tools/interact.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 +""" +Interact with page elements. + +Usage: + python interact.py --session abc123 --action click --selector "#submit-btn" +""" + +import argparse +import sys +import json +from pathlib import Path + +SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' + + +def interact(session_id: str, action: str, selector: str, value: str = None): + """Perform interaction on element.""" + session_file = SESSIONS_DIR / f"{session_id}.json" + + if not session_file.exists(): + raise ValueError(f"Session not found: {session_id}") + + session = json.loads(session_file.read_text()) + + # Record interaction + interaction = { + "type": "interact", + "action": action, + "selector": selector + } + if value: + interaction["value"] = value + + session["actions"].append(interaction) + session_file.write_text(json.dumps(session, indent=2)) + + return { + "status": "success", + "session_id": session_id, + "action": action, + "selector": selector, + "value": value + } + + +def main(): + parser = argparse.ArgumentParser(description='Interact with elements') + parser.add_argument('--session', required=True, help='Session ID') + parser.add_argument('--action', required=True, + choices=['click', 'type', 'fill', 'hover', 'scroll', 'select']) + parser.add_argument('--selector', required=True, help='Element selector') + parser.add_argument('--value', help='Value for type/fill/select') + + args = parser.parse_args() + + try: + result = interact(args.session, args.action, args.selector, args.value) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/webapp-testing/tools/navigate.py b/coderrr-skills/skills/webapp-testing/tools/navigate.py new file mode 100644 index 0000000..1ee503b --- /dev/null +++ b/coderrr-skills/skills/webapp-testing/tools/navigate.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +""" +Navigate to a URL in browser session. + +Usage: + python navigate.py --session abc123 --url http://localhost:3000 +""" + +import argparse +import sys +import json +from pathlib import Path + +SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' + + +def navigate(session_id: str, url: str, wait_until: str = 'load'): + """Navigate to URL.""" + session_file = SESSIONS_DIR / f"{session_id}.json" + + if not session_file.exists(): + raise ValueError(f"Session not found: {session_id}") + + session = json.loads(session_file.read_text()) + + # Record navigation action + session["actions"].append({ + "type": "navigate", + "url": url, + "wait_until": wait_until + }) + session["current_url"] = url + + session_file.write_text(json.dumps(session, indent=2)) + + return { + "status": "success", + "session_id": session_id, + "url": url, + "wait_until": wait_until + } + + +def main(): + parser = argparse.ArgumentParser(description='Navigate to URL') + parser.add_argument('--session', required=True, help='Session ID') + parser.add_argument('--url', required=True, help='URL to navigate to') + parser.add_argument('--wait-until', default='load', + choices=['load', 'domcontentloaded', 'networkidle']) + + args = parser.parse_args() + + try: + result = navigate(args.session, args.url, args.wait_until) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/webapp-testing/tools/screenshot.py b/coderrr-skills/skills/webapp-testing/tools/screenshot.py new file mode 100644 index 0000000..283e90e --- /dev/null +++ b/coderrr-skills/skills/webapp-testing/tools/screenshot.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +""" +Capture page screenshot. + +Usage: + python screenshot.py --session abc123 --output ./screenshot.png +""" + +import argparse +import sys +import json +from pathlib import Path + +SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' + + +def screenshot(session_id: str, output: str, selector: str = None, full_page: bool = False): + """Capture screenshot.""" + session_file = SESSIONS_DIR / f"{session_id}.json" + + if not session_file.exists(): + raise ValueError(f"Session not found: {session_id}") + + session = json.loads(session_file.read_text()) + + # Record screenshot action + screenshot_action = { + "type": "screenshot", + "output": output, + "selector": selector, + "full_page": full_page + } + + session["actions"].append(screenshot_action) + session_file.write_text(json.dumps(session, indent=2)) + + # Create placeholder file (in real implementation, would capture actual screenshot) + output_path = Path(output) + output_path.parent.mkdir(parents=True, exist_ok=True) + output_path.write_text("Screenshot placeholder - use actual Playwright for real capture") + + return { + "status": "success", + "session_id": session_id, + "output": str(output_path.absolute()), + "full_page": full_page, + "selector": selector + } + + +def main(): + parser = argparse.ArgumentParser(description='Capture screenshot') + parser.add_argument('--session', required=True, help='Session ID') + parser.add_argument('--output', required=True, help='Output file path') + parser.add_argument('--selector', help='Capture specific element') + parser.add_argument('--full-page', action='store_true', help='Capture full page') + + args = parser.parse_args() + + try: + result = screenshot(args.session, args.output, args.selector, args.full_page) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/webapp-testing/tools/start_browser.py b/coderrr-skills/skills/webapp-testing/tools/start_browser.py new file mode 100644 index 0000000..71c0f5d --- /dev/null +++ b/coderrr-skills/skills/webapp-testing/tools/start_browser.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +""" +Start a browser testing session. + +Usage: + python start_browser.py --browser chromium --headless +""" + +import argparse +import sys +import json +import uuid +from pathlib import Path + +try: + from playwright.sync_api import sync_playwright +except ImportError: + print("Error: 'playwright' package is required. Install with: pip install playwright && playwright install", file=sys.stderr) + sys.exit(1) + + +# Session storage (in real implementation, use proper session management) +SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' + + +def start_browser(browser: str = 'chromium', headless: bool = False, viewport: str = None): + """Start browser session.""" + SESSIONS_DIR.mkdir(parents=True, exist_ok=True) + + session_id = str(uuid.uuid4())[:8] + + # Parse viewport + width, height = 1280, 720 + if viewport: + parts = viewport.lower().split('x') + if len(parts) == 2: + width, height = int(parts[0]), int(parts[1]) + + # Store session config (actual browser managed separately) + session_config = { + "id": session_id, + "browser": browser, + "headless": headless, + "viewport": {"width": width, "height": height}, + "status": "ready", + "actions": [] + } + + session_file = SESSIONS_DIR / f"{session_id}.json" + session_file.write_text(json.dumps(session_config, indent=2)) + + return { + "status": "success", + "session_id": session_id, + "browser": browser, + "headless": headless, + "viewport": f"{width}x{height}" + } + + +def main(): + parser = argparse.ArgumentParser(description='Start browser session') + parser.add_argument('--browser', default='chromium', choices=['chromium', 'firefox', 'webkit']) + parser.add_argument('--headless', action='store_true') + parser.add_argument('--viewport', help='Viewport size (e.g., 1920x1080)') + + args = parser.parse_args() + + try: + result = start_browser(args.browser, args.headless, args.viewport) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/webapp-testing/tools/verify.py b/coderrr-skills/skills/webapp-testing/tools/verify.py new file mode 100644 index 0000000..b167143 --- /dev/null +++ b/coderrr-skills/skills/webapp-testing/tools/verify.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +""" +Verify page state and elements. + +Usage: + python verify.py --session abc123 --check visible --selector "#success-msg" +""" + +import argparse +import sys +import json +from pathlib import Path + +SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' + + +def verify(session_id: str, check: str, selector: str = None, expected: str = None): + """Verify page state.""" + session_file = SESSIONS_DIR / f"{session_id}.json" + + if not session_file.exists(): + raise ValueError(f"Session not found: {session_id}") + + session = json.loads(session_file.read_text()) + + # Record verification + verification = { + "type": "verify", + "check": check, + "selector": selector, + "expected": expected + } + + session["actions"].append(verification) + session_file.write_text(json.dumps(session, indent=2)) + + # In real implementation, would actually perform verification + return { + "status": "success", + "session_id": session_id, + "check": check, + "selector": selector, + "expected": expected, + "passed": True, + "message": f"Verification '{check}' passed" + } + + +def main(): + parser = argparse.ArgumentParser(description='Verify page state') + parser.add_argument('--session', required=True, help='Session ID') + parser.add_argument('--check', required=True, + choices=['visible', 'hidden', 'text', 'value', 'title', 'url']) + parser.add_argument('--selector', help='Element selector') + parser.add_argument('--expected', help='Expected value') + + args = parser.parse_args() + + try: + result = verify(args.session, args.check, args.selector, args.expected) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/xlsx/Skills.md b/coderrr-skills/skills/xlsx/Skills.md new file mode 100644 index 0000000..c89a197 --- /dev/null +++ b/coderrr-skills/skills/xlsx/Skills.md @@ -0,0 +1,169 @@ +--- +name: xlsx +description: Create and manipulate Excel spreadsheets with formulas and formatting. Use this skill when the user asks to create Excel files, read spreadsheet data, update cells, add formulas, format worksheets, or analyze Excel structure. Supports multiple sheets, cell formatting, and Excel formulas. +--- + +This skill provides comprehensive Excel manipulation using openpyxl. It handles spreadsheet creation, data reading/writing, formula insertion, and formatting. + +The user provides spreadsheet requirements or existing files to process. They may want to create reports, read data, update values, or apply formatting. + +## Approach + +Before invoking tools, understand the spreadsheet task: +- **Create new**: Use `create_xlsx` with data and structure +- **Read data**: Use `read_xlsx` to extract cell values +- **Modify cells**: Use `edit_xlsx` to update content +- **Analyze structure**: Use `analyze_xlsx` for workbook overview + +## Tools + +### create_xlsx + +Creates Excel workbooks with data, formulas, and formatting. + +```bash +python tools/create_xlsx.py --output <path> --sheets <json> +``` + +**Arguments:** +- `--output` (required): Output file path (.xlsx) +- `--sheets` (required): JSON specification of sheets and data + +**Sheets JSON Structure:** +```json +[ + { + "name": "Sheet1", + "headers": ["Name", "Value", "Total"], + "data": [ + ["Item A", 100, "=B2*1.1"], + ["Item B", 200, "=B3*1.1"] + ], + "column_widths": {"A": 20, "B": 15, "C": 15} + } +] +``` + +**Formula Support:** +- Start cell values with `=` for formulas +- Standard Excel formulas: `=SUM(A1:A10)`, `=AVERAGE(B:B)`, `=IF(A1>0,"Yes","No")` + +**When to use:** +- Generating data reports +- Creating templates +- Building formatted spreadsheets +- Automating Excel file creation + +--- + +### read_xlsx + +Reads data from Excel files. + +```bash +python tools/read_xlsx.py --file <path> [--sheet <name>] [--range <A1:Z100>] [--format <json|csv|text>] +``` + +**Arguments:** +- `--file` (required): Path to Excel file +- `--sheet` (optional): Sheet name (default: active sheet) +- `--range` (optional): Cell range to read (e.g., "A1:D10") +- `--format` (optional): Output format (default: json) + +**When to use:** +- Extracting spreadsheet data +- Reading specific ranges +- Converting Excel to other formats +- Processing uploaded files + +--- + +### edit_xlsx + +Modifies existing Excel files. + +```bash +python tools/edit_xlsx.py --file <path> --output <path> --operations <json> +``` + +**Arguments:** +- `--file` (required): Input Excel file +- `--output` (required): Output file path +- `--operations` (required): JSON array of operations + +**Operations:** +```json +[ + {"action": "set_cell", "sheet": "Sheet1", "cell": "A1", "value": "Updated"}, + {"action": "set_range", "sheet": "Sheet1", "start": "A2", "data": [["Row1"], ["Row2"]]}, + {"action": "add_formula", "sheet": "Sheet1", "cell": "C10", "formula": "=SUM(C1:C9)"}, + {"action": "add_sheet", "name": "NewSheet"}, + {"action": "format_cell", "sheet": "Sheet1", "cell": "A1", "bold": true, "bg_color": "FFFF00"} +] +``` + +**When to use:** +- Updating cell values +- Adding formulas +- Applying formatting +- Modifying structure + +--- + +### analyze_xlsx + +Analyzes workbook structure and statistics. + +```bash +python tools/analyze_xlsx.py --file <path> +``` + +**Output:** JSON with sheet names, dimensions, cell counts, and formula locations. + +**When to use:** +- Understanding workbook structure +- Getting sheet dimensions +- Finding formulas +- Auditing spreadsheets + +## Common Patterns + +### Create Simple Spreadsheet +```bash +python tools/create_xlsx.py --output data.xlsx --sheets '[{"name": "Data", "headers": ["ID", "Name", "Value"], "data": [[1, "Item A", 100], [2, "Item B", 200]]}]' +``` + +### Read Entire Sheet +```bash +python tools/read_xlsx.py --file data.xlsx --format json +``` + +### Read Specific Range +```bash +python tools/read_xlsx.py --file data.xlsx --sheet "Sheet1" --range "A1:C10" --format csv +``` + +### Update Cells +```bash +python tools/edit_xlsx.py --file data.xlsx --output updated.xlsx --operations '[{"action": "set_cell", "sheet": "Sheet1", "cell": "B2", "value": 150}]' +``` + +### Add Summary Formula +```bash +python tools/edit_xlsx.py --file data.xlsx --output updated.xlsx --operations '[{"action": "add_formula", "sheet": "Sheet1", "cell": "B10", "formula": "=SUM(B2:B9)"}]' +``` + +## Formula Examples + +| Formula | Description | +|---------|-------------| +| `=SUM(A1:A10)` | Sum of range | +| `=AVERAGE(B:B)` | Average of column | +| `=IF(A1>0,"Yes","No")` | Conditional | +| `=VLOOKUP(A1,Sheet2!A:B,2,FALSE)` | Lookup | +| `=CONCATENATE(A1," ",B1)` | Text join | +| `=TODAY()` | Current date | + +## Dependencies + +Requires `openpyxl>=3.1.0`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/xlsx/requirements.txt b/coderrr-skills/skills/xlsx/requirements.txt new file mode 100644 index 0000000..9cc1e67 --- /dev/null +++ b/coderrr-skills/skills/xlsx/requirements.txt @@ -0,0 +1 @@ +openpyxl>=3.1.0 diff --git a/coderrr-skills/skills/xlsx/tools/analyze_xlsx.py b/coderrr-skills/skills/xlsx/tools/analyze_xlsx.py new file mode 100644 index 0000000..5810d76 --- /dev/null +++ b/coderrr-skills/skills/xlsx/tools/analyze_xlsx.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +""" +Analyze Excel workbook structure. + +Usage: + python analyze_xlsx.py --file data.xlsx +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from openpyxl import load_workbook + from openpyxl.utils import get_column_letter +except ImportError: + print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) + sys.exit(1) + + +def analyze_xlsx(file_path: str) -> dict: + """Analyze workbook structure.""" + wb = load_workbook(file_path) + + sheets_info = [] + total_cells = 0 + total_formulas = 0 + + for sheet_name in wb.sheetnames: + ws = wb[sheet_name] + + cell_count = 0 + formula_count = 0 + formulas = [] + + for row in ws.iter_rows(): + for cell in row: + if cell.value is not None: + cell_count += 1 + if isinstance(cell.value, str) and cell.value.startswith('='): + formula_count += 1 + if len(formulas) < 5: # Limit examples + formulas.append({ + "cell": cell.coordinate, + "formula": cell.value + }) + + sheets_info.append({ + "name": sheet_name, + "dimensions": f"A1:{get_column_letter(ws.max_column)}{ws.max_row}", + "rows": ws.max_row, + "columns": ws.max_column, + "cell_count": cell_count, + "formula_count": formula_count, + "sample_formulas": formulas + }) + + total_cells += cell_count + total_formulas += formula_count + + return { + "file": str(file_path), + "sheet_count": len(wb.sheetnames), + "total_cells": total_cells, + "total_formulas": total_formulas, + "sheets": sheets_info + } + + +def main(): + parser = argparse.ArgumentParser(description='Analyze Excel workbooks') + parser.add_argument('--file', required=True, help='Path to Excel file') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = analyze_xlsx(args.file) + print(json.dumps(result, indent=2)) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/xlsx/tools/create_xlsx.py b/coderrr-skills/skills/xlsx/tools/create_xlsx.py new file mode 100644 index 0000000..0304f67 --- /dev/null +++ b/coderrr-skills/skills/xlsx/tools/create_xlsx.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +""" +Create Excel workbooks. + +Usage: + python create_xlsx.py --output data.xlsx --sheets '[...]' +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from openpyxl import Workbook + from openpyxl.styles import Font, PatternFill, Alignment + from openpyxl.utils import get_column_letter +except ImportError: + print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) + sys.exit(1) + + +def create_xlsx(output_path: str, sheets_spec: list): + """Create an Excel workbook.""" + wb = Workbook() + + # Remove default sheet if we're creating new ones + if sheets_spec: + default_sheet = wb.active + wb.remove(default_sheet) + + for sheet_spec in sheets_spec: + sheet_name = sheet_spec.get('name', 'Sheet1') + ws = wb.create_sheet(title=sheet_name) + + row_num = 1 + + # Add headers + headers = sheet_spec.get('headers', []) + if headers: + for col, header in enumerate(headers, 1): + cell = ws.cell(row=row_num, column=col, value=header) + cell.font = Font(bold=True) + row_num += 1 + + # Add data + data = sheet_spec.get('data', []) + for row_data in data: + for col, value in enumerate(row_data, 1): + cell = ws.cell(row=row_num, column=col) + if isinstance(value, str) and value.startswith('='): + cell.value = value # Formula + else: + cell.value = value + row_num += 1 + + # Set column widths + col_widths = sheet_spec.get('column_widths', {}) + for col_letter, width in col_widths.items(): + ws.column_dimensions[col_letter].width = width + + wb.save(output_path) + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Create Excel workbooks') + parser.add_argument('--output', required=True, help='Output file path (.xlsx)') + parser.add_argument('--sheets', required=True, help='JSON specification of sheets') + + args = parser.parse_args() + + try: + sheets = json.loads(args.sheets) + except json.JSONDecodeError as e: + print(f"Error: Invalid sheets JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = create_xlsx(args.output, sheets) + print(json.dumps({"status": "success", "file": result, "sheets": len(sheets)})) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/xlsx/tools/edit_xlsx.py b/coderrr-skills/skills/xlsx/tools/edit_xlsx.py new file mode 100644 index 0000000..edf0d65 --- /dev/null +++ b/coderrr-skills/skills/xlsx/tools/edit_xlsx.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +""" +Edit Excel files. + +Usage: + python edit_xlsx.py --file input.xlsx --output output.xlsx --operations '[...]' +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from openpyxl import load_workbook + from openpyxl.styles import Font, PatternFill +except ImportError: + print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) + sys.exit(1) + + +def apply_operation(wb, operation): + """Apply a single edit operation.""" + action = operation.get('action') + sheet_name = operation.get('sheet', wb.active.title) + + if action == 'set_cell': + ws = wb[sheet_name] + cell = operation.get('cell', 'A1') + value = operation.get('value') + ws[cell] = value + + elif action == 'set_range': + ws = wb[sheet_name] + start = operation.get('start', 'A1') + data = operation.get('data', []) + + # Parse start cell + col_letter = ''.join(filter(str.isalpha, start)) + row_num = int(''.join(filter(str.isdigit, start))) + + for row_idx, row_data in enumerate(data): + for col_idx, value in enumerate(row_data): + ws.cell(row=row_num + row_idx, column=ord(col_letter) - ord('A') + 1 + col_idx, value=value) + + elif action == 'add_formula': + ws = wb[sheet_name] + cell = operation.get('cell', 'A1') + formula = operation.get('formula', '') + ws[cell] = formula + + elif action == 'add_sheet': + name = operation.get('name', 'NewSheet') + wb.create_sheet(title=name) + + elif action == 'format_cell': + ws = wb[sheet_name] + cell = operation.get('cell', 'A1') + cell_obj = ws[cell] + + if operation.get('bold'): + cell_obj.font = Font(bold=True) + if operation.get('bg_color'): + cell_obj.fill = PatternFill(start_color=operation['bg_color'], + end_color=operation['bg_color'], + fill_type='solid') + + +def edit_xlsx(input_path: str, output_path: str, operations: list): + """Edit an Excel file.""" + wb = load_workbook(input_path) + + for operation in operations: + apply_operation(wb, operation) + + wb.save(output_path) + return output_path + + +def main(): + parser = argparse.ArgumentParser(description='Edit Excel files') + parser.add_argument('--file', required=True, help='Input Excel file') + parser.add_argument('--output', required=True, help='Output file path') + parser.add_argument('--operations', required=True, help='JSON array of operations') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + operations = json.loads(args.operations) + except json.JSONDecodeError as e: + print(f"Error: Invalid operations JSON - {e}", file=sys.stderr) + sys.exit(1) + + try: + result = edit_xlsx(args.file, args.output, operations) + print(json.dumps({"status": "success", "file": result})) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/coderrr-skills/skills/xlsx/tools/read_xlsx.py b/coderrr-skills/skills/xlsx/tools/read_xlsx.py new file mode 100644 index 0000000..fd3e966 --- /dev/null +++ b/coderrr-skills/skills/xlsx/tools/read_xlsx.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +""" +Read data from Excel files. + +Usage: + python read_xlsx.py --file data.xlsx --format json +""" + +import argparse +import sys +import json +from pathlib import Path + +try: + from openpyxl import load_workbook + from openpyxl.utils import get_column_letter +except ImportError: + print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) + sys.exit(1) + + +def parse_range(range_str: str): + """Parse range like A1:C10 into start and end.""" + if ':' in range_str: + start, end = range_str.split(':') + return start, end + return range_str, range_str + + +def read_xlsx(file_path: str, sheet_name: str = None, cell_range: str = None, output_format: str = 'json'): + """Read Excel file data.""" + wb = load_workbook(file_path, data_only=True) + + # Get sheet + if sheet_name: + if sheet_name not in wb.sheetnames: + raise ValueError(f"Sheet '{sheet_name}' not found. Available: {wb.sheetnames}") + ws = wb[sheet_name] + else: + ws = wb.active + + # Determine range + if cell_range: + data = [] + for row in ws[cell_range]: + row_data = [cell.value for cell in row] + data.append(row_data) + else: + data = [] + for row in ws.iter_rows(values_only=True): + if any(cell is not None for cell in row): + data.append(list(row)) + + # Format output + if output_format == 'json': + return json.dumps({ + "sheet": ws.title, + "range": cell_range or f"A1:{get_column_letter(ws.max_column)}{ws.max_row}", + "data": data + }, indent=2, default=str) + + elif output_format == 'csv': + lines = [] + for row in data: + line = ','.join(str(cell) if cell is not None else '' for cell in row) + lines.append(line) + return '\n'.join(lines) + + else: # text + lines = [] + for row in data: + line = '\t'.join(str(cell) if cell is not None else '' for cell in row) + lines.append(line) + return '\n'.join(lines) + + +def main(): + parser = argparse.ArgumentParser(description='Read Excel files') + parser.add_argument('--file', required=True, help='Path to Excel file') + parser.add_argument('--sheet', help='Sheet name') + parser.add_argument('--range', help='Cell range (e.g., A1:D10)') + parser.add_argument('--format', choices=['json', 'csv', 'text'], default='json') + + args = parser.parse_args() + + if not Path(args.file).exists(): + print(f"Error: File not found: {args.file}", file=sys.stderr) + sys.exit(1) + + try: + result = read_xlsx(args.file, args.sheet, args.range, args.format) + print(result) + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + +if __name__ == '__main__': + main() From af12900c53d66c79f0c8c57356662984ddbd425e Mon Sep 17 00:00:00 2001 From: Akash-nath29 <anath5440@gmail.com> Date: Sun, 1 Feb 2026 04:30:46 +0530 Subject: [PATCH 2/3] Create Coderrr-skills --- coderrr-skills/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/coderrr-skills/README.md b/coderrr-skills/README.md index b7fa630..7ea8e84 100644 --- a/coderrr-skills/README.md +++ b/coderrr-skills/README.md @@ -144,6 +144,6 @@ MIT License - see [LICENSE](LICENSE) for details. ## šŸ”— Links -- [Coderrr CLI](https://github.com/Akash-nath29/coderrr) -- [Documentation](https://github.com/Akash-nath29/coderrr#readme) -- [Issue Tracker](https://github.com/Akash-nath29/coderrr-skills/issues) +- [Coderrr CLI](https://github.com/Akash-nath29/Coderrr) +- [Documentation](https://github.com/Akash-nath29/Coderrr#readme) +- [Issue Tracker](https://github.com/Akash-nath29/Coderrr-skills/issues) From 78a6b39a8bb7d6ac881dc70ac2c590a46140214d Mon Sep 17 00:00:00 2001 From: Akash-nath29 <anath5440@gmail.com> Date: Sun, 1 Feb 2026 04:42:12 +0530 Subject: [PATCH 3/3] Add Coderrr Skills --- README.md | 32 ++ backend/main.py | 5 +- bin/coderrr.js | 4 + coderrr-skills/.gitignore | 75 ---- coderrr-skills/CONTRIBUTING.md | 204 ----------- coderrr-skills/LICENSE | 21 -- coderrr-skills/README.md | 149 -------- coderrr-skills/registry.json | 326 ------------------ coderrr-skills/skills/api-client/Skills.md | 201 ----------- .../skills/api-client/requirements.txt | 1 - .../skills/api-client/tools/http_get.py | 129 ------- .../skills/api-client/tools/http_post.py | 148 -------- .../skills/api-client/tools/parse_response.py | 232 ------------- .../skills/artifacts-builder/Skills.md | 189 ---------- .../artifacts-builder/tools/add_component.py | 186 ---------- .../artifacts-builder/tools/build_artifact.py | 80 ----- .../tools/preview_artifact.py | 82 ----- .../tools/scaffold_artifact.py | 245 ------------- .../skills/brand-guidelines/Skills.md | 173 ---------- .../brand-guidelines/tools/export_tokens.py | 131 ------- .../tools/generate_palette.py | 113 ------ .../brand-guidelines/tools/set_brand.py | 84 ----- coderrr-skills/skills/code-analyzer/Skills.md | 175 ---------- .../skills/code-analyzer/requirements.txt | 2 - .../skills/code-analyzer/tools/count_lines.py | 198 ----------- .../skills/code-analyzer/tools/find_todos.py | 152 -------- .../skills/code-analyzer/tools/lint_python.py | 186 ---------- coderrr-skills/skills/docx/Skills.md | 160 --------- coderrr-skills/skills/docx/requirements.txt | 1 - .../skills/docx/tools/analyze_docx.py | 95 ----- .../skills/docx/tools/create_docx.py | 117 ------- coderrr-skills/skills/docx/tools/edit_docx.py | 108 ------ coderrr-skills/skills/docx/tools/read_docx.py | 148 -------- coderrr-skills/skills/file-search/Skills.md | 138 -------- .../skills/file-search/tools/file_stats.py | 153 -------- .../skills/file-search/tools/find_files.py | 111 ------ .../file-search/tools/search_content.py | 162 --------- .../skills/internal-comms/Skills.md | 210 ----------- .../internal-comms/tools/announcement.py | 102 ------ .../internal-comms/tools/meeting_summary.py | 95 ----- .../skills/internal-comms/tools/newsletter.py | 122 ------- .../internal-comms/tools/status_report.py | 123 ------- coderrr-skills/skills/json-tools/Skills.md | 168 --------- .../skills/json-tools/tools/format_json.py | 106 ------ .../skills/json-tools/tools/query_json.py | 194 ----------- .../skills/json-tools/tools/validate_json.py | 97 ------ coderrr-skills/skills/mcp-builder/Skills.md | 203 ----------- .../skills/mcp-builder/tools/add_mcp_tool.py | 92 ----- .../skills/mcp-builder/tools/init_mcp.py | 181 ---------- .../skills/mcp-builder/tools/validate_mcp.py | 96 ------ coderrr-skills/skills/pdf/Skills.md | 186 ---------- coderrr-skills/skills/pdf/requirements.txt | 3 - coderrr-skills/skills/pdf/tools/create_pdf.py | 114 ------ .../skills/pdf/tools/extract_pdf.py | 109 ------ coderrr-skills/skills/pdf/tools/merge_pdf.py | 59 ---- coderrr-skills/skills/pdf/tools/pdf_info.py | 77 ----- coderrr-skills/skills/pdf/tools/split_pdf.py | 89 ----- coderrr-skills/skills/pptx/Skills.md | 146 -------- coderrr-skills/skills/pptx/requirements.txt | 1 - .../skills/pptx/tools/analyze_pptx.py | 76 ---- .../skills/pptx/tools/create_pptx.py | 131 ------- coderrr-skills/skills/pptx/tools/edit_pptx.py | 103 ------ coderrr-skills/skills/pptx/tools/read_pptx.py | 112 ------ coderrr-skills/skills/skill-creator/Skills.md | 158 --------- .../skills/skill-creator/tools/add_tool.py | 158 --------- .../skill-creator/tools/finalize_skill.py | 112 ------ .../skills/skill-creator/tools/init_skill.py | 98 ------ .../skill-creator/tools/list_templates.py | 113 ------ coderrr-skills/skills/web-scraper/Skills.md | 103 ------ .../skills/web-scraper/requirements.txt | 2 - .../skills/web-scraper/tools/extract_text.py | 127 ------- .../skills/web-scraper/tools/fetch_page.py | 95 ----- .../skills/webapp-testing/Skills.md | 211 ------------ .../skills/webapp-testing/requirements.txt | 1 - .../skills/webapp-testing/tools/interact.py | 66 ---- .../skills/webapp-testing/tools/navigate.py | 62 ---- .../skills/webapp-testing/tools/screenshot.py | 69 ---- .../webapp-testing/tools/start_browser.py | 78 ----- .../skills/webapp-testing/tools/verify.py | 68 ---- coderrr-skills/skills/xlsx/Skills.md | 169 --------- coderrr-skills/skills/xlsx/requirements.txt | 1 - .../skills/xlsx/tools/analyze_xlsx.py | 90 ----- .../skills/xlsx/tools/create_xlsx.py | 88 ----- coderrr-skills/skills/xlsx/tools/edit_xlsx.py | 107 ------ coderrr-skills/skills/xlsx/tools/read_xlsx.py | 99 ------ src/agent.js | 38 ++ src/executor.js | 1 + src/skillMarketplace.js | 249 +++++++++++++ src/skillRegistry.js | 308 +++++++++++++++++ src/skillRunner.js | 250 ++++++++++++++ src/skillsUI.js | 315 +++++++++++++++++ 91 files changed, 1201 insertions(+), 9746 deletions(-) delete mode 100644 coderrr-skills/.gitignore delete mode 100644 coderrr-skills/CONTRIBUTING.md delete mode 100644 coderrr-skills/LICENSE delete mode 100644 coderrr-skills/README.md delete mode 100644 coderrr-skills/registry.json delete mode 100644 coderrr-skills/skills/api-client/Skills.md delete mode 100644 coderrr-skills/skills/api-client/requirements.txt delete mode 100644 coderrr-skills/skills/api-client/tools/http_get.py delete mode 100644 coderrr-skills/skills/api-client/tools/http_post.py delete mode 100644 coderrr-skills/skills/api-client/tools/parse_response.py delete mode 100644 coderrr-skills/skills/artifacts-builder/Skills.md delete mode 100644 coderrr-skills/skills/artifacts-builder/tools/add_component.py delete mode 100644 coderrr-skills/skills/artifacts-builder/tools/build_artifact.py delete mode 100644 coderrr-skills/skills/artifacts-builder/tools/preview_artifact.py delete mode 100644 coderrr-skills/skills/artifacts-builder/tools/scaffold_artifact.py delete mode 100644 coderrr-skills/skills/brand-guidelines/Skills.md delete mode 100644 coderrr-skills/skills/brand-guidelines/tools/export_tokens.py delete mode 100644 coderrr-skills/skills/brand-guidelines/tools/generate_palette.py delete mode 100644 coderrr-skills/skills/brand-guidelines/tools/set_brand.py delete mode 100644 coderrr-skills/skills/code-analyzer/Skills.md delete mode 100644 coderrr-skills/skills/code-analyzer/requirements.txt delete mode 100644 coderrr-skills/skills/code-analyzer/tools/count_lines.py delete mode 100644 coderrr-skills/skills/code-analyzer/tools/find_todos.py delete mode 100644 coderrr-skills/skills/code-analyzer/tools/lint_python.py delete mode 100644 coderrr-skills/skills/docx/Skills.md delete mode 100644 coderrr-skills/skills/docx/requirements.txt delete mode 100644 coderrr-skills/skills/docx/tools/analyze_docx.py delete mode 100644 coderrr-skills/skills/docx/tools/create_docx.py delete mode 100644 coderrr-skills/skills/docx/tools/edit_docx.py delete mode 100644 coderrr-skills/skills/docx/tools/read_docx.py delete mode 100644 coderrr-skills/skills/file-search/Skills.md delete mode 100644 coderrr-skills/skills/file-search/tools/file_stats.py delete mode 100644 coderrr-skills/skills/file-search/tools/find_files.py delete mode 100644 coderrr-skills/skills/file-search/tools/search_content.py delete mode 100644 coderrr-skills/skills/internal-comms/Skills.md delete mode 100644 coderrr-skills/skills/internal-comms/tools/announcement.py delete mode 100644 coderrr-skills/skills/internal-comms/tools/meeting_summary.py delete mode 100644 coderrr-skills/skills/internal-comms/tools/newsletter.py delete mode 100644 coderrr-skills/skills/internal-comms/tools/status_report.py delete mode 100644 coderrr-skills/skills/json-tools/Skills.md delete mode 100644 coderrr-skills/skills/json-tools/tools/format_json.py delete mode 100644 coderrr-skills/skills/json-tools/tools/query_json.py delete mode 100644 coderrr-skills/skills/json-tools/tools/validate_json.py delete mode 100644 coderrr-skills/skills/mcp-builder/Skills.md delete mode 100644 coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py delete mode 100644 coderrr-skills/skills/mcp-builder/tools/init_mcp.py delete mode 100644 coderrr-skills/skills/mcp-builder/tools/validate_mcp.py delete mode 100644 coderrr-skills/skills/pdf/Skills.md delete mode 100644 coderrr-skills/skills/pdf/requirements.txt delete mode 100644 coderrr-skills/skills/pdf/tools/create_pdf.py delete mode 100644 coderrr-skills/skills/pdf/tools/extract_pdf.py delete mode 100644 coderrr-skills/skills/pdf/tools/merge_pdf.py delete mode 100644 coderrr-skills/skills/pdf/tools/pdf_info.py delete mode 100644 coderrr-skills/skills/pdf/tools/split_pdf.py delete mode 100644 coderrr-skills/skills/pptx/Skills.md delete mode 100644 coderrr-skills/skills/pptx/requirements.txt delete mode 100644 coderrr-skills/skills/pptx/tools/analyze_pptx.py delete mode 100644 coderrr-skills/skills/pptx/tools/create_pptx.py delete mode 100644 coderrr-skills/skills/pptx/tools/edit_pptx.py delete mode 100644 coderrr-skills/skills/pptx/tools/read_pptx.py delete mode 100644 coderrr-skills/skills/skill-creator/Skills.md delete mode 100644 coderrr-skills/skills/skill-creator/tools/add_tool.py delete mode 100644 coderrr-skills/skills/skill-creator/tools/finalize_skill.py delete mode 100644 coderrr-skills/skills/skill-creator/tools/init_skill.py delete mode 100644 coderrr-skills/skills/skill-creator/tools/list_templates.py delete mode 100644 coderrr-skills/skills/web-scraper/Skills.md delete mode 100644 coderrr-skills/skills/web-scraper/requirements.txt delete mode 100644 coderrr-skills/skills/web-scraper/tools/extract_text.py delete mode 100644 coderrr-skills/skills/web-scraper/tools/fetch_page.py delete mode 100644 coderrr-skills/skills/webapp-testing/Skills.md delete mode 100644 coderrr-skills/skills/webapp-testing/requirements.txt delete mode 100644 coderrr-skills/skills/webapp-testing/tools/interact.py delete mode 100644 coderrr-skills/skills/webapp-testing/tools/navigate.py delete mode 100644 coderrr-skills/skills/webapp-testing/tools/screenshot.py delete mode 100644 coderrr-skills/skills/webapp-testing/tools/start_browser.py delete mode 100644 coderrr-skills/skills/webapp-testing/tools/verify.py delete mode 100644 coderrr-skills/skills/xlsx/Skills.md delete mode 100644 coderrr-skills/skills/xlsx/requirements.txt delete mode 100644 coderrr-skills/skills/xlsx/tools/analyze_xlsx.py delete mode 100644 coderrr-skills/skills/xlsx/tools/create_xlsx.py delete mode 100644 coderrr-skills/skills/xlsx/tools/edit_xlsx.py delete mode 100644 coderrr-skills/skills/xlsx/tools/read_xlsx.py create mode 100644 src/skillMarketplace.js create mode 100644 src/skillRegistry.js create mode 100644 src/skillRunner.js create mode 100644 src/skillsUI.js diff --git a/README.md b/README.md index 603f23b..b62aeaf 100644 --- a/README.md +++ b/README.md @@ -19,8 +19,40 @@ Coderrr is an AI-powered coding agent that analyzes tasks, creates actionable plans, performs file operations, and executes commands with user permission. Built for developers who want automated assistance without sacrificing control. +--- + +## 🧩 Skills Marketplace + +**Extend Coderrr's capabilities with installable skills!** + +Browse and install skills from the [coderrr-skills](https://github.com/Akash-nath29/coderrr-skills) marketplace: + +```bash +# Browse available skills +coderrr market + +# Install a skill +coderrr install web-scraper + +# List installed skills +coderrr skills +``` + +| Skill | Description | +|-------|-------------| +| **web-scraper** | Fetch and extract content from web pages | +| **pdf** | Create, merge, split, and extract PDFs | +| **code-analyzer** | Lint code, count lines, find TODOs | +| **docx/xlsx/pptx** | Work with Office documents | +| **api-client** | Make HTTP requests | + +šŸ‘‰ **[Browse all skills →](https://github.com/Akash-nath29/coderrr-skills)** + +--- + ## Table of Contents +- [🧩 Skills Marketplace](#-skills-marketplace) - [See Coderrr in Action](#see-coderrr-in-action) - [Features](#features) - [Core Capabilities](#core-capabilities) diff --git a/backend/main.py b/backend/main.py index 6135706..a3609fc 100644 --- a/backend/main.py +++ b/backend/main.py @@ -93,7 +93,7 @@ class PlanStep(BaseModel): action: Literal[ "create_file", "update_file", "patch_file", "delete_file", "read_file", "run_command", "create_dir", "delete_dir", - "list_dir", "rename_dir" + "list_dir", "rename_dir", "invoke_skill" ] path: Optional[str] = None content: Optional[str] = None @@ -102,6 +102,9 @@ class PlanStep(BaseModel): old_path: Optional[str] = Field(default=None, alias="oldPath") new_path: Optional[str] = Field(default=None, alias="newPath") command: Optional[str] = None + skill: Optional[str] = None # Skill name for invoke_skill + tool: Optional[str] = None # Tool name for invoke_skill + args: Optional[dict] = None # Arguments for invoke_skill summary: str diff --git a/bin/coderrr.js b/bin/coderrr.js index 49f5717..8ae1342 100644 --- a/bin/coderrr.js +++ b/bin/coderrr.js @@ -26,6 +26,10 @@ program const { displayRecipeList } = require('../src/recipeUI'); const recipeManager = require('../src/recipeManager'); const { displayInsights } = require('../src/insightsUI'); +const { registerSkillCommands } = require('../src/skillsUI'); + +// Register skill management commands +registerSkillCommands(program); // Optional: Load .env from user's home directory (for advanced users who want custom backend) const homeConfigPath = path.join(os.homedir(), '.coderrr', '.env'); diff --git a/coderrr-skills/.gitignore b/coderrr-skills/.gitignore deleted file mode 100644 index a98b31a..0000000 --- a/coderrr-skills/.gitignore +++ /dev/null @@ -1,75 +0,0 @@ -# Python -__pycache__/ -*.py[cod] -*$py.class -*.so -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# Virtual environments -venv/ -ENV/ -env/ -.venv/ - -# IDE -.idea/ -.vscode/ -*.swp -*.swo -*~ -.project -.pydevproject -.settings/ - -# OS -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -ehthumbs.db -Thumbs.db -Desktop.ini - -# Testing -.pytest_cache/ -.coverage -htmlcov/ -.tox/ -.nox/ - -# Logs -*.log -logs/ - -# Local development -.env -.env.local -*.local - -# Temporary files -tmp/ -temp/ -*.tmp -*.bak - -# Node (if any tooling uses it) -node_modules/ - -# Compiled Python -*.pyc diff --git a/coderrr-skills/CONTRIBUTING.md b/coderrr-skills/CONTRIBUTING.md deleted file mode 100644 index e8833ad..0000000 --- a/coderrr-skills/CONTRIBUTING.md +++ /dev/null @@ -1,204 +0,0 @@ -# Contributing to Coderrr Skills - -Thank you for your interest in contributing to the Coderrr skills marketplace! This guide will help you create and submit your own skills. - -## šŸ“‹ Table of Contents - -- [Getting Started](#getting-started) -- [Skill Requirements](#skill-requirements) -- [Creating a New Skill](#creating-a-new-skill) -- [Testing Your Skill](#testing-your-skill) -- [Submitting a Pull Request](#submitting-a-pull-request) -- [Code of Conduct](#code-of-conduct) - -## Getting Started - -1. **Fork** this repository -2. **Clone** your fork locally -3. Create a new **branch** for your skill: `git checkout -b skill/your-skill-name` - -## Skill Requirements - -Every skill must meet these requirements: - -### Required Files - -``` -skills/your-skill-name/ -ā”œā”€ā”€ Skills.md # Required: Skill documentation -ā”œā”€ā”€ requirements.txt # Optional: Python dependencies -└── tools/ - └── your_tool.py # Required: At least one tool -``` - -### Skills.md Format - -Your `Skills.md` must include: - -```markdown ---- -name: your-skill-name -displayName: Your Skill Name -description: Brief description of what your skill does -version: 1.0.0 -author: Your Name -tags: - - tag1 - - tag2 ---- - -# Your Skill Name - -Detailed description of your skill. - -## Tools - -### tool_name - -Description of what this tool does. - -**Arguments:** -- `--arg1` (required): Description -- `--arg2` (optional): Description - -**Example:** -\`\`\`bash -python tools/tool_name.py --arg1 value -\`\`\` - -**Output:** -Description of output format -``` - -### Tool Requirements - -Each Python tool must: - -1. **Use argparse** for command-line arguments -2. **Include docstrings** explaining functionality -3. **Handle errors gracefully** with informative messages -4. **Output to stdout** for easy piping -5. **Return exit code 0** on success, non-zero on failure - -### Example Tool Structure - -```python -#!/usr/bin/env python3 -""" -Brief description of what this tool does. -""" - -import argparse -import sys -import json - - -def main(): - parser = argparse.ArgumentParser( - description='What this tool does' - ) - parser.add_argument('--input', required=True, help='Input description') - parser.add_argument('--format', default='json', help='Output format') - - args = parser.parse_args() - - try: - # Your tool logic here - result = process(args.input) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() -``` - -## Testing Your Skill - -Before submitting, test your skill locally: - -1. **Install Coderrr CLI** (if not already installed): - ```bash - npm install -g coderrr-cli - ``` - -2. **Install your skill locally**: - ```bash - coderrr install ./skills/your-skill-name - ``` - -3. **Test each tool**: - ```bash - python ~/.coderrr/skills/your-skill-name/tools/your_tool.py --help - ``` - -4. **Verify with the agent**: - ```bash - coderrr - > Use the your_tool to do something - ``` - -## Submitting a Pull Request - -1. **Update registry.json** with your skill metadata: - ```json - { - "your-skill-name": { - "name": "your-skill-name", - "displayName": "Your Skill Name", - "description": "What your skill does", - "version": "1.0.0", - "author": "Your Name", - "repository": "https://github.com/your-username/your-repo", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/your-skill-name", - "tools": ["tool1", "tool2"], - "tags": ["tag1", "tag2"] - } - } - ``` - -2. **Commit your changes**: - ```bash - git add . - git commit -m "Add your-skill-name skill" - ``` - -3. **Push to your fork**: - ```bash - git push origin skill/your-skill-name - ``` - -4. **Open a Pull Request** with: - - Clear title: `Add [skill-name] skill` - - Description of what your skill does - - List of tools included - - Any external dependencies - -### PR Checklist - -- [ ] Skills.md is complete with all required sections -- [ ] All tools have proper docstrings -- [ ] All tools handle errors gracefully -- [ ] requirements.txt lists all dependencies (if any) -- [ ] registry.json is updated with correct metadata -- [ ] Tools tested locally and working - -## Code of Conduct - -- **Be respectful** in all interactions -- **Write clean, readable code** with comments -- **Document thoroughly** for other users -- **Test before submitting** to avoid broken skills -- **No malicious code** - skills that harm users will be removed - -## Questions? - -If you have questions, feel free to: - -- Open an [issue](https://github.com/Akash-nath29/coderrr-skills/issues) -- Check existing skills for examples -- Read the [Coderrr CLI documentation](https://github.com/Akash-nath29/Coderrr) - -Thank you for contributing! šŸš€ diff --git a/coderrr-skills/LICENSE b/coderrr-skills/LICENSE deleted file mode 100644 index baba5bc..0000000 --- a/coderrr-skills/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2026 Akash Nath - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/coderrr-skills/README.md b/coderrr-skills/README.md deleted file mode 100644 index 7ea8e84..0000000 --- a/coderrr-skills/README.md +++ /dev/null @@ -1,149 +0,0 @@ -# Coderrr Skills Marketplace - -[![Skills](https://img.shields.io/badge/skills-15-blue)](https://github.com/Akash-nath29/coderrr-skills) -[![License](https://img.shields.io/badge/license-MIT-green)](LICENSE) -[![Python](https://img.shields.io/badge/python-3.8%2B-blue)](https://python.org) - -A marketplace of installable skills for [Coderrr CLI](https://github.com/Akash-nath29/coderrr) - the AI-powered coding assistant. - -## šŸš€ Quick Start - -Install skills directly from Coderrr CLI: - -```bash -# Install a skill -coderrr install web-scraper - -# List installed skills -coderrr skills - -# Search for skills -coderrr search pdf -``` - -## šŸ“¦ Available Skills - -| Skill | Description | Tools | -|-------|-------------|-------| -| **web-scraper** | Fetch, parse, and extract content from web pages | `fetch_page`, `extract_text` | -| **file-search** | Find files and search content within your filesystem | `find_files`, `search_content`, `file_stats` | -| **code-analyzer** | Analyze code quality, structure, and maintainability | `lint_python`, `count_lines`, `find_todos` | -| **json-tools** | Format, query, and validate JSON data | `format_json`, `query_json`, `validate_json` | -| **api-client** | Make HTTP requests and work with API responses | `http_get`, `http_post`, `parse_response` | -| **docx** | Create, edit, and analyze Word documents | `create_docx`, `read_docx`, `edit_docx`, `analyze_docx` | -| **pdf** | Comprehensive PDF toolkit for document manipulation | `extract_pdf`, `create_pdf`, `merge_pdf`, `split_pdf`, `pdf_info` | -| **pptx** | Create, edit, and analyze PowerPoint presentations | `create_pptx`, `read_pptx`, `edit_pptx`, `analyze_pptx` | -| **xlsx** | Create and manipulate Excel spreadsheets with formulas | `create_xlsx`, `read_xlsx`, `edit_xlsx`, `analyze_xlsx` | -| **skill-creator** | Interactive tool for building new custom skills | `init_skill`, `add_tool`, `finalize_skill`, `list_templates` | -| **artifacts-builder** | Build complex HTML artifacts using React and Tailwind | `scaffold_artifact`, `add_component`, `build_artifact`, `preview_artifact` | -| **mcp-builder** | Guide for creating high-quality MCP servers | `init_mcp`, `add_mcp_tool`, `validate_mcp` | -| **webapp-testing** | Test web applications using Playwright automation | `start_browser`, `navigate`, `interact`, `verify`, `screenshot` | -| **brand-guidelines** | Apply brand colors, typography, and design tokens | `set_brand`, `generate_palette`, `export_tokens` | -| **internal-comms** | Write status reports, newsletters, and announcements | `status_report`, `newsletter`, `announcement`, `meeting_summary` | - -## šŸŽÆ Skills by Category - -### šŸ“„ Document Processing -- **docx** - Word document handling -- **pdf** - PDF manipulation -- **pptx** - PowerPoint presentations -- **xlsx** - Excel spreadsheets - -### 🌐 Web & API -- **web-scraper** - Web page scraping -- **api-client** - HTTP requests -- **webapp-testing** - Browser automation - -### šŸ’» Development -- **code-analyzer** - Code quality analysis -- **json-tools** - JSON manipulation -- **file-search** - File system operations -- **skill-creator** - Skill development -- **mcp-builder** - MCP server creation - -### šŸŽØ Design & Communication -- **artifacts-builder** - HTML/React components -- **brand-guidelines** - Design tokens -- **internal-comms** - Team communications - -## šŸ“ Repository Structure - -``` -coderrr-skills/ -ā”œā”€ā”€ registry.json # Central skill registry -ā”œā”€ā”€ README.md # This file -ā”œā”€ā”€ CONTRIBUTING.md # Contribution guidelines -ā”œā”€ā”€ LICENSE # MIT License -└── skills/ - ā”œā”€ā”€ web-scraper/ - │ ā”œā”€ā”€ Skills.md # Skill documentation - │ ā”œā”€ā”€ requirements.txt - │ └── tools/ - │ ā”œā”€ā”€ fetch_page.py - │ └── extract_text.py - ā”œā”€ā”€ docx/ - │ ā”œā”€ā”€ Skills.md - │ ā”œā”€ā”€ requirements.txt - │ └── tools/ - │ ā”œā”€ā”€ create_docx.py - │ ā”œā”€ā”€ read_docx.py - │ ā”œā”€ā”€ edit_docx.py - │ └── analyze_docx.py - └── ... (other skills) -``` - -## šŸ› ļø Creating New Skills - -1. Fork this repository -2. Create a new skill directory under `skills/` -3. Add required files: - - `Skills.md` - Documentation with YAML frontmatter - - `tools/` - Python tool scripts - - `requirements.txt` - Dependencies (if any) -4. Update `registry.json` -5. Submit a pull request - -See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed guidelines. - -### Skill Structure - -```markdown ---- -name: my-skill -description: What this skill does and when to use it ---- - -Detailed documentation for the AI agent... -``` - -## šŸ“– Skills.md Format - -Each skill's `Skills.md` follows this structure: - -1. **YAML Frontmatter** - `name` and `description` -2. **Approach** - When to use which tool -3. **Tools** - Detailed documentation for each tool -4. **Common Patterns** - Usage examples -5. **Best Practices** - Guidelines for effective use -6. **Dependencies** - Required packages - -## šŸ¤ Contributing - -Contributions are welcome! Please read our [Contributing Guidelines](CONTRIBUTING.md) before submitting. - -### Ideas for New Skills -- Database connectors -- Cloud service integrations -- Image manipulation -- Markdown processing -- Git automation - -## šŸ“„ License - -MIT License - see [LICENSE](LICENSE) for details. - -## šŸ”— Links - -- [Coderrr CLI](https://github.com/Akash-nath29/Coderrr) -- [Documentation](https://github.com/Akash-nath29/Coderrr#readme) -- [Issue Tracker](https://github.com/Akash-nath29/Coderrr-skills/issues) diff --git a/coderrr-skills/registry.json b/coderrr-skills/registry.json deleted file mode 100644 index 01d548d..0000000 --- a/coderrr-skills/registry.json +++ /dev/null @@ -1,326 +0,0 @@ -{ - "version": "1.0", - "registry_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/registry.json", - "skills": { - "web-scraper": { - "name": "web-scraper", - "displayName": "Web Scraper", - "description": "Fetch, parse, and extract content from web pages. Use this skill when the user asks to scrape websites, extract text from URLs, parse HTML content, download web pages, or analyze website content.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/web-scraper", - "tools": [ - "fetch_page", - "extract_text" - ], - "tags": [ - "web", - "scraping", - "http", - "html", - "parsing" - ] - }, - "file-search": { - "name": "file-search", - "displayName": "File Search", - "description": "Find files and search content within your filesystem. Use this skill when the user asks to find files by name or pattern, search for text within files, get directory statistics, count files, or analyze folder structure.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/file-search", - "tools": [ - "find_files", - "search_content", - "file_stats" - ], - "tags": [ - "files", - "search", - "filesystem", - "grep", - "find" - ] - }, - "code-analyzer": { - "name": "code-analyzer", - "displayName": "Code Analyzer", - "description": "Analyze code quality, structure, and maintainability. Use this skill when the user asks to lint code, count lines of code, find TODO/FIXME comments, analyze code structure, check for issues, or audit a codebase.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/code-analyzer", - "tools": [ - "lint_python", - "count_lines", - "find_todos" - ], - "tags": [ - "code", - "analysis", - "linting", - "quality", - "python" - ] - }, - "json-tools": { - "name": "json-tools", - "displayName": "JSON Tools", - "description": "Format, query, and validate JSON data. Use this skill when the user asks to pretty-print JSON, extract values from JSON, validate JSON syntax, minify JSON, or work with nested JSON structures.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/json-tools", - "tools": [ - "format_json", - "query_json", - "validate_json" - ], - "tags": [ - "json", - "data", - "formatting", - "validation", - "query" - ] - }, - "api-client": { - "name": "api-client", - "displayName": "API Client", - "description": "Make HTTP requests and work with API responses. Use this skill when the user asks to call APIs, make HTTP GET/POST requests, test endpoints, fetch data from REST APIs, or parse and format API responses.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/api-client", - "tools": [ - "http_get", - "http_post", - "parse_response" - ], - "tags": [ - "api", - "http", - "rest", - "requests", - "web" - ] - }, - "docx": { - "name": "docx", - "displayName": "Word Documents", - "description": "Create, edit, and analyze Word documents with professional formatting. Use this skill when the user asks to create Word documents, add content to DOCX files, extract text from Word files, work with tables, headers, footers, or analyze document structure.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/docx", - "tools": [ - "create_docx", - "read_docx", - "edit_docx", - "analyze_docx" - ], - "tags": [ - "document", - "word", - "docx", - "office", - "writing" - ] - }, - "pdf": { - "name": "pdf", - "displayName": "PDF Toolkit", - "description": "Comprehensive PDF toolkit for document manipulation. Use this skill when the user asks to extract text from PDFs, create PDF documents, merge or split PDFs, extract tables from PDFs, work with PDF forms, or analyze PDF structure.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/pdf", - "tools": [ - "extract_pdf", - "create_pdf", - "merge_pdf", - "split_pdf", - "pdf_info" - ], - "tags": [ - "pdf", - "document", - "extract", - "merge", - "split" - ] - }, - "pptx": { - "name": "pptx", - "displayName": "PowerPoint", - "description": "Create, edit, and analyze PowerPoint presentations. Use this skill when the user asks to create slides, modify presentations, extract content from PPTX files, add speaker notes, or analyze presentation structure.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/pptx", - "tools": [ - "create_pptx", - "read_pptx", - "edit_pptx", - "analyze_pptx" - ], - "tags": [ - "presentation", - "powerpoint", - "slides", - "office" - ] - }, - "xlsx": { - "name": "xlsx", - "displayName": "Excel Spreadsheets", - "description": "Create and manipulate Excel spreadsheets with formulas and formatting. Use this skill when the user asks to create Excel files, read spreadsheet data, update cells, add formulas, format worksheets, or analyze Excel structure.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/xlsx", - "tools": [ - "create_xlsx", - "read_xlsx", - "edit_xlsx", - "analyze_xlsx" - ], - "tags": [ - "excel", - "spreadsheet", - "data", - "formulas", - "office" - ] - }, - "skill-creator": { - "name": "skill-creator", - "displayName": "Skill Creator", - "description": "Interactive tool for building new custom skills for Coderrr. Use this skill when the user wants to create a new skill, scaffold a skill structure, generate tool templates, or set up skill documentation.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/skill-creator", - "tools": [ - "init_skill", - "add_tool", - "finalize_skill", - "list_templates" - ], - "tags": [ - "meta", - "development", - "scaffolding", - "tools" - ] - }, - "artifacts-builder": { - "name": "artifacts-builder", - "displayName": "Artifacts Builder", - "description": "Build complex HTML artifacts using React, Tailwind CSS, and shadcn/ui components. Use this skill when the user wants to create polished UI components, interactive web widgets, dashboards, landing pages, or sophisticated HTML artifacts.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/artifacts-builder", - "tools": [ - "scaffold_artifact", - "add_component", - "build_artifact", - "preview_artifact" - ], - "tags": [ - "frontend", - "react", - "tailwind", - "ui", - "html" - ] - }, - "mcp-builder": { - "name": "mcp-builder", - "displayName": "MCP Builder", - "description": "Guide for creating high-quality MCP (Model Context Protocol) servers. Use this skill when the user wants to build an MCP server, create MCP tools, implement MCP resources, or integrate with MCP-compatible clients.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/mcp-builder", - "tools": [ - "init_mcp", - "add_mcp_tool", - "validate_mcp" - ], - "tags": [ - "mcp", - "protocol", - "server", - "integration" - ] - }, - "webapp-testing": { - "name": "webapp-testing", - "displayName": "Web App Testing", - "description": "Test local web applications using Playwright browser automation. Use this skill when the user wants to test web applications, automate browser interactions, take screenshots, verify UI elements, or run end-to-end tests.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/webapp-testing", - "tools": [ - "start_browser", - "navigate", - "interact", - "verify", - "screenshot" - ], - "tags": [ - "testing", - "browser", - "playwright", - "automation", - "e2e" - ] - }, - "brand-guidelines": { - "name": "brand-guidelines", - "displayName": "Brand Guidelines", - "description": "Apply official brand colors, typography, and design tokens to projects. Use this skill when the user wants to set up brand colors, configure typography, generate color palettes, create design tokens, or ensure brand consistency.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/brand-guidelines", - "tools": [ - "set_brand", - "generate_palette", - "export_tokens" - ], - "tags": [ - "design", - "branding", - "colors", - "typography", - "tokens" - ] - }, - "internal-comms": { - "name": "internal-comms", - "displayName": "Internal Communications", - "description": "Write internal communications like status reports, newsletters, announcements, and team updates. Use this skill when the user needs to draft status reports, write team newsletters, create announcements, or compose meeting summaries.", - "version": "1.0.0", - "author": "Akash Nath", - "repository": "https://github.com/Akash-nath29/coderrr-skills", - "download_url": "https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/skills/internal-comms", - "tools": [ - "status_report", - "newsletter", - "announcement", - "meeting_summary" - ], - "tags": [ - "communication", - "reports", - "newsletters", - "documentation" - ] - } - } -} \ No newline at end of file diff --git a/coderrr-skills/skills/api-client/Skills.md b/coderrr-skills/skills/api-client/Skills.md deleted file mode 100644 index 0849b8f..0000000 --- a/coderrr-skills/skills/api-client/Skills.md +++ /dev/null @@ -1,201 +0,0 @@ ---- -name: api-client -description: Make HTTP requests and work with API responses. Use this skill when the user asks to call APIs, make HTTP GET/POST requests, test endpoints, fetch data from REST APIs, or parse and format API responses. Supports custom headers, JSON payloads, and multiple output formats. ---- - -This skill provides HTTP client functionality for interacting with REST APIs. It handles authentication headers, JSON request bodies, error responses, and output formatting in JSON, table, or CSV formats. - -The user provides an API endpoint to call or response data to process. They may need to test APIs, fetch data, or parse responses into usable formats. - -## Approach - -Before invoking tools, understand the API interaction: -- **Fetch data**: Use `http_get` for reading resources -- **Send data**: Use `http_post` for creating/updating resources -- **Process results**: Use `parse_response` to extract or reformat data -- **Full workflow**: Chain all three for complete API interactions - -Consider authentication, headers, and expected response formats. - -## Tools - -### http_get - -Makes HTTP GET requests with optional custom headers. - -```bash -python tools/http_get.py --url <url> [--headers <json>] [--timeout <seconds>] -``` - -**Arguments:** -- `--url` (required): Complete URL including protocol (https://...) -- `--headers` (optional): JSON string of headers (e.g., `'{"Authorization": "Bearer token"}'`) -- `--timeout` (optional): Request timeout in seconds (default: 30) - -**Output:** Response body. JSON responses are automatically pretty-printed. - -**Default headers included:** -- `User-Agent: Coderrr-API-Client/1.0` -- `Accept: application/json` - -**When to use:** -- Fetching resources from APIs -- Testing API endpoints -- Downloading JSON data -- Checking API availability - ---- - -### http_post - -Makes HTTP POST requests with JSON body. - -```bash -python tools/http_post.py --url <url> --data <json> [--headers <json>] [--timeout <seconds>] -``` - -**Arguments:** -- `--url` (required): Complete URL including protocol -- `--data` (required): JSON string of request body -- `--headers` (optional): JSON string of additional headers -- `--timeout` (optional): Request timeout in seconds (default: 30) - -**Output:** Response body. JSON responses are automatically pretty-printed. - -**Default headers included:** -- `Content-Type: application/json` -- `Accept: application/json` - -**When to use:** -- Creating new resources -- Submitting form data -- Authenticating with APIs -- Triggering actions - ---- - -### parse_response - -Parses JSON responses and formats or extracts data. - -```bash -python tools/parse_response.py [--data <json>] [--extract <path>] [--format <type>] -``` - -**Arguments:** -- `--data` (optional): JSON string to parse. If omitted, reads from stdin -- `--extract` (optional): Path expression to extract (e.g., `data.users[0].name`) -- `--format` (optional): Output format - `json`, `table`, or `csv` (default: json) - -**Output:** Formatted data according to specified format. - -**When to use:** -- Extracting specific fields from responses -- Converting JSON to readable tables -- Exporting data to CSV -- Processing piped API output - -## Common Patterns - -### Simple GET Request -```bash -python tools/http_get.py --url https://api.github.com/users/octocat -``` - -### Authenticated Request -```bash -python tools/http_get.py --url https://api.example.com/me --headers '{"Authorization": "Bearer YOUR_TOKEN"}' -``` - -### POST with JSON Data -```bash -python tools/http_post.py --url https://api.example.com/users --data '{"name": "John", "email": "john@example.com"}' -``` - -### Chained API Call with Extraction -```bash -python tools/http_get.py --url https://api.example.com/users | python tools/parse_response.py --extract "data[*].email" -``` - -### Format as Table -```bash -python tools/http_get.py --url https://api.example.com/users | python tools/parse_response.py --format table -``` - -### Export to CSV -```bash -python tools/http_get.py --url https://api.example.com/users | python tools/parse_response.py --format csv > users.csv -``` - -## Authentication Patterns - -### Bearer Token -```bash ---headers '{"Authorization": "Bearer YOUR_ACCESS_TOKEN"}' -``` - -### API Key (Header) -```bash ---headers '{"X-API-Key": "YOUR_API_KEY"}' -``` - -### Basic Auth (Base64) -```bash ---headers '{"Authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ="}' -``` - -### Multiple Headers -```bash ---headers '{"Authorization": "Bearer token", "X-Request-ID": "123", "Accept-Language": "en-US"}' -``` - -## Response Format Examples - -### JSON (default) -```json -{ - "id": 1, - "name": "John Doe", - "email": "john@example.com" -} -``` - -### Table -``` -| id | name | email | -|----|----------|------------------| -| 1 | John Doe | john@example.com | -| 2 | Jane Doe | jane@example.com | -``` - -### CSV -```csv -id,name,email -1,John Doe,john@example.com -2,Jane Doe,jane@example.com -``` - -## Best Practices - -1. **Always use HTTPS** - Never send credentials over HTTP -2. **Handle errors** - Check exit codes and stderr for failures -3. **Set appropriate timeouts** - Long for slow APIs, short for health checks -4. **Use extraction** - Don't process entire responses when you need one field -5. **Chain tools** - Pipe http_get to parse_response for clean workflows -6. **Escape JSON carefully** - Use single quotes around JSON strings in bash - -## Error Handling - -| Exit Code | Meaning | Recovery | -|-----------|---------|----------| -| 0 | Success | - | -| 1 | Invalid arguments or URL | Check URL format, header syntax | -| 2 | Network/connection error | Verify network, check timeout | -| 3 | HTTP error (4xx, 5xx) | Check authentication, request format | -| 4 | JSON parsing error | Verify response is valid JSON | - -**HTTP Error Details:** When HTTP 4xx/5xx occurs, the response body is still printed to stderr for debugging. - -## Dependencies - -Requires `requests>=2.28.0`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/api-client/requirements.txt b/coderrr-skills/skills/api-client/requirements.txt deleted file mode 100644 index a8608b2..0000000 --- a/coderrr-skills/skills/api-client/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -requests>=2.28.0 diff --git a/coderrr-skills/skills/api-client/tools/http_get.py b/coderrr-skills/skills/api-client/tools/http_get.py deleted file mode 100644 index b254a16..0000000 --- a/coderrr-skills/skills/api-client/tools/http_get.py +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python3 -""" -Make HTTP GET requests. - -This tool makes GET requests to URLs and outputs the response body. -JSON responses are automatically pretty-printed. - -Usage: - python http_get.py --url https://api.example.com/users - python http_get.py --url https://api.example.com/users --headers '{"Auth": "token"}' - -Exit Codes: - 0 - Success - 1 - Invalid arguments or URL - 2 - Network/connection error - 3 - HTTP error (4xx, 5xx) -""" - -import argparse -import sys -import json - -try: - import requests -except ImportError: - print("Error: 'requests' package is required. Install with: pip install requests", file=sys.stderr) - sys.exit(1) - - -def http_get(url: str, headers: dict = None, timeout: int = 30) -> str: - """ - Make an HTTP GET request. - - Args: - url: The URL to request - headers: Optional headers dictionary - timeout: Request timeout in seconds - - Returns: - Response body as string - """ - default_headers = { - 'User-Agent': 'Coderrr-API-Client/1.0', - 'Accept': 'application/json' - } - - if headers: - default_headers.update(headers) - - response = requests.get(url, headers=default_headers, timeout=timeout) - response.raise_for_status() - - return response.text - - -def main(): - parser = argparse.ArgumentParser( - description='Make HTTP GET requests', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python http_get.py --url https://api.example.com/users - python http_get.py --url https://api.example.com/data --headers '{"Authorization": "Bearer token"}' - ''' - ) - parser.add_argument( - '--url', - required=True, - help='The URL to request' - ) - parser.add_argument( - '--headers', - help='JSON string of headers' - ) - parser.add_argument( - '--timeout', - type=int, - default=30, - help='Request timeout in seconds (default: 30)' - ) - - args = parser.parse_args() - - # Parse headers if provided - headers = None - if args.headers: - try: - headers = json.loads(args.headers) - except json.JSONDecodeError as e: - print(f"Error: Invalid headers JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - response = http_get(args.url, headers, args.timeout) - - # Try to pretty-print if JSON - try: - data = json.loads(response) - print(json.dumps(data, indent=2, ensure_ascii=False)) - except json.JSONDecodeError: - # Not JSON, print as-is - print(response) - - except requests.exceptions.MissingSchema: - print(f"Error: Invalid URL. Include http:// or https://", file=sys.stderr) - sys.exit(1) - except requests.exceptions.ConnectionError as e: - print(f"Error: Connection failed - {e}", file=sys.stderr) - sys.exit(2) - except requests.exceptions.Timeout: - print(f"Error: Request timed out after {args.timeout} seconds", file=sys.stderr) - sys.exit(2) - except requests.exceptions.HTTPError as e: - print(f"Error: HTTP {e.response.status_code} - {e.response.reason}", file=sys.stderr) - # Still output the response body if available - if e.response.text: - try: - data = json.loads(e.response.text) - print(json.dumps(data, indent=2), file=sys.stderr) - except json.JSONDecodeError: - print(e.response.text, file=sys.stderr) - sys.exit(3) - except requests.exceptions.RequestException as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(2) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/api-client/tools/http_post.py b/coderrr-skills/skills/api-client/tools/http_post.py deleted file mode 100644 index 0c367a1..0000000 --- a/coderrr-skills/skills/api-client/tools/http_post.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python3 -""" -Make HTTP POST requests with JSON body. - -This tool makes POST requests to URLs with JSON data and outputs -the response body. - -Usage: - python http_post.py --url https://api.example.com/users --data '{"name": "John"}' - -Exit Codes: - 0 - Success - 1 - Invalid arguments or URL - 2 - Network/connection error - 3 - HTTP error (4xx, 5xx) - 4 - Invalid JSON data -""" - -import argparse -import sys -import json - -try: - import requests -except ImportError: - print("Error: 'requests' package is required. Install with: pip install requests", file=sys.stderr) - sys.exit(1) - - -def http_post(url: str, data: dict, headers: dict = None, timeout: int = 30) -> str: - """ - Make an HTTP POST request with JSON body. - - Args: - url: The URL to request - data: Dictionary to send as JSON - headers: Optional additional headers - timeout: Request timeout in seconds - - Returns: - Response body as string - """ - default_headers = { - 'User-Agent': 'Coderrr-API-Client/1.0', - 'Content-Type': 'application/json', - 'Accept': 'application/json' - } - - if headers: - default_headers.update(headers) - - response = requests.post( - url, - json=data, - headers=default_headers, - timeout=timeout - ) - response.raise_for_status() - - return response.text - - -def main(): - parser = argparse.ArgumentParser( - description='Make HTTP POST requests with JSON body', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python http_post.py --url https://api.example.com/users --data '{"name": "John"}' - python http_post.py --url https://api.example.com/auth --data '{"user": "admin", "pass": "123"}' --headers '{"X-API-Key": "secret"}' - ''' - ) - parser.add_argument( - '--url', - required=True, - help='The URL to request' - ) - parser.add_argument( - '--data', - required=True, - help='JSON string of request body' - ) - parser.add_argument( - '--headers', - help='JSON string of additional headers' - ) - parser.add_argument( - '--timeout', - type=int, - default=30, - help='Request timeout in seconds (default: 30)' - ) - - args = parser.parse_args() - - # Parse data - try: - data = json.loads(args.data) - except json.JSONDecodeError as e: - print(f"Error: Invalid data JSON - {e}", file=sys.stderr) - sys.exit(4) - - # Parse headers if provided - headers = None - if args.headers: - try: - headers = json.loads(args.headers) - except json.JSONDecodeError as e: - print(f"Error: Invalid headers JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - response = http_post(args.url, data, headers, args.timeout) - - # Try to pretty-print if JSON - try: - resp_data = json.loads(response) - print(json.dumps(resp_data, indent=2, ensure_ascii=False)) - except json.JSONDecodeError: - # Not JSON, print as-is - print(response) - - except requests.exceptions.MissingSchema: - print(f"Error: Invalid URL. Include http:// or https://", file=sys.stderr) - sys.exit(1) - except requests.exceptions.ConnectionError as e: - print(f"Error: Connection failed - {e}", file=sys.stderr) - sys.exit(2) - except requests.exceptions.Timeout: - print(f"Error: Request timed out after {args.timeout} seconds", file=sys.stderr) - sys.exit(2) - except requests.exceptions.HTTPError as e: - print(f"Error: HTTP {e.response.status_code} - {e.response.reason}", file=sys.stderr) - # Still output the response body if available - if e.response.text: - try: - resp_data = json.loads(e.response.text) - print(json.dumps(resp_data, indent=2), file=sys.stderr) - except json.JSONDecodeError: - print(e.response.text, file=sys.stderr) - sys.exit(3) - except requests.exceptions.RequestException as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(2) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/api-client/tools/parse_response.py b/coderrr-skills/skills/api-client/tools/parse_response.py deleted file mode 100644 index a6db5b5..0000000 --- a/coderrr-skills/skills/api-client/tools/parse_response.py +++ /dev/null @@ -1,232 +0,0 @@ -#!/usr/bin/env python3 -""" -Parse and format API responses. - -This tool takes JSON response data and can extract specific fields -or format the output as JSON, table, or CSV. - -Usage: - python parse_response.py --data '{"user": {"name": "John"}}' - echo '{"data": [...]}' | python parse_response.py --extract "data" - python parse_response.py --data '[...]' --format table - -Exit Codes: - 0 - Success - 1 - Invalid arguments - 4 - JSON parsing error -""" - -import argparse -import sys -import json -import re -from typing import Any, List - - -def parse_path(path: str) -> List[Any]: - """Parse a path expression into components.""" - components = [] - pattern = r'\.?([^\.\[\]]+)|\[(\d+|\*)\]' - - for match in re.finditer(pattern, path): - if match.group(1): - components.append(match.group(1)) - elif match.group(2): - idx = match.group(2) - if idx == '*': - components.append('*') - else: - components.append(int(idx)) - - return components - - -def extract_value(data: Any, path: str) -> Any: - """Extract a value from data using a path expression.""" - if not path: - return data - - components = parse_path(path) - current = data - - for component in components: - if component == '*': - if isinstance(current, list): - return current - raise TypeError("Can't use [*] on non-array") - elif isinstance(component, int): - if not isinstance(current, list): - raise TypeError(f"Can't use [{component}] on non-array") - current = current[component] - else: - if not isinstance(current, dict): - raise TypeError(f"Can't access '{component}' on non-object") - current = current.get(component) - if current is None: - raise KeyError(f"Key '{component}' not found") - - return current - - -def format_as_table(data: Any) -> str: - """Format data as an ASCII table.""" - if isinstance(data, dict): - data = [data] - - if not isinstance(data, list) or not data: - return json.dumps(data, indent=2) - - if not isinstance(data[0], dict): - return json.dumps(data, indent=2) - - # Get all keys - keys = [] - for item in data: - for key in item.keys(): - if key not in keys: - keys.append(key) - - # Calculate column widths - widths = {key: len(str(key)) for key in keys} - for item in data: - for key in keys: - val = str(item.get(key, '')) - widths[key] = max(widths[key], len(val)) - - # Build table - lines = [] - - # Header - header = '| ' + ' | '.join(str(k).ljust(widths[k]) for k in keys) + ' |' - separator = '|-' + '-|-'.join('-' * widths[k] for k in keys) + '-|' - - lines.append(header) - lines.append(separator) - - # Rows - for item in data: - row = '| ' + ' | '.join(str(item.get(k, '')).ljust(widths[k]) for k in keys) + ' |' - lines.append(row) - - return '\n'.join(lines) - - -def format_as_csv(data: Any) -> str: - """Format data as CSV.""" - if isinstance(data, dict): - data = [data] - - if not isinstance(data, list) or not data: - return '' - - if not isinstance(data[0], dict): - # Simple array of values - return '\n'.join(str(item) for item in data) - - # Get all keys - keys = [] - for item in data: - for key in item.keys(): - if key not in keys: - keys.append(key) - - lines = [] - - # Header - lines.append(','.join(keys)) - - # Rows - for item in data: - values = [] - for key in keys: - val = str(item.get(key, '')) - # Escape commas and quotes - if ',' in val or '"' in val or '\n' in val: - val = '"' + val.replace('"', '""') + '"' - values.append(val) - lines.append(','.join(values)) - - return '\n'.join(lines) - - -def parse_response(data_str: str, extract: str = None, output_format: str = 'json') -> str: - """ - Parse and format response data. - - Args: - data_str: JSON string to parse - extract: Optional path to extract - output_format: Output format (json, table, csv) - - Returns: - Formatted output string - """ - data = json.loads(data_str) - - # Extract if path provided - if extract: - data = extract_value(data, extract) - - # Format output - if output_format == 'table': - return format_as_table(data) - elif output_format == 'csv': - return format_as_csv(data) - else: - return json.dumps(data, indent=2, ensure_ascii=False) - - -def main(): - parser = argparse.ArgumentParser( - description='Parse and format API responses', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python parse_response.py --data '{"user": {"name": "John"}}' --extract "user.name" - echo '{"items": [...]}' | python parse_response.py --extract "items" --format table - python parse_response.py --data '[{"id":1},{"id":2}]' --format csv - ''' - ) - parser.add_argument( - '--data', - help='JSON response data (if not provided, reads from stdin)' - ) - parser.add_argument( - '--extract', - help='Path to extract specific field (e.g., "data.users[0].name")' - ) - parser.add_argument( - '--format', - choices=['json', 'table', 'csv'], - default='json', - help='Output format (default: json)' - ) - - args = parser.parse_args() - - # Get data from argument or stdin - if args.data: - data_str = args.data - else: - if sys.stdin.isatty(): - print("Error: No data provided. Use --data or pipe JSON to stdin.", file=sys.stderr) - sys.exit(1) - data_str = sys.stdin.read() - - if not data_str.strip(): - print("Error: Empty input", file=sys.stderr) - sys.exit(1) - - try: - result = parse_response(data_str, args.extract, args.format) - print(result) - except json.JSONDecodeError as e: - print(f"Error: Invalid JSON - {e}", file=sys.stderr) - sys.exit(4) - except (KeyError, TypeError, IndexError) as e: - print(f"Error extracting path: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/artifacts-builder/Skills.md b/coderrr-skills/skills/artifacts-builder/Skills.md deleted file mode 100644 index a41c580..0000000 --- a/coderrr-skills/skills/artifacts-builder/Skills.md +++ /dev/null @@ -1,189 +0,0 @@ ---- -name: artifacts-builder -description: Build complex HTML artifacts using React, Tailwind CSS, and shadcn/ui components. Use this skill when the user wants to create polished UI components, interactive web widgets, dashboards, landing pages, or sophisticated HTML artifacts with modern styling and interactivity. ---- - -This skill guides creation of production-quality HTML artifacts using modern frontend technologies. It helps scaffold React components with Tailwind CSS styling and shadcn/ui components for professional, consistent design. - -The user wants to create an HTML artifact. They may specify the type (component, page, widget, dashboard), design requirements, and interactivity needs. - -## Approach - -When building artifacts: -1. **Analyze requirements**: Understand the UI/UX goals -2. **Choose architecture**: Single HTML vs multi-file component -3. **Select components**: Pick appropriate shadcn/ui components -4. **Build progressively**: Start with structure, add styling, then interactivity - -## Tools - -### scaffold_artifact - -Creates the initial artifact structure with appropriate boilerplate. - -```bash -python tools/scaffold_artifact.py --name <name> --type <type> --output-dir <path> [--features <list>] -``` - -**Arguments:** -- `--name` (required): Artifact name (used for file naming) -- `--type` (required): Artifact type - `component`, `page`, `widget`, `dashboard` -- `--output-dir` (required): Output directory -- `--features` (optional): Comma-separated features - `dark-mode`, `animations`, `responsive` - -**Types:** -- `component` - Reusable React component -- `page` - Full page layout -- `widget` - Self-contained interactive widget -- `dashboard` - Data dashboard with charts/tables - -**When to use:** -- Starting a new artifact -- Getting proper boilerplate -- Setting up the file structure - ---- - -### add_component - -Adds pre-built component templates to an artifact. - -```bash -python tools/add_component.py --artifact-dir <path> --component <name> [--variant <variant>] -``` - -**Arguments:** -- `--artifact-dir` (required): Path to artifact directory -- `--component` (required): Component to add (see list below) -- `--variant` (optional): Component variant/style - -**Available Components:** -- `button` - Interactive buttons (variants: primary, secondary, outline, ghost) -- `card` - Content card with header/body/footer -- `dialog` - Modal dialog -- `dropdown` - Dropdown menu -- `form` - Form with validation -- `table` - Data table with sorting -- `tabs` - Tabbed interface -- `navbar` - Navigation bar -- `sidebar` - Side navigation -- `chart` - Data visualization (variants: line, bar, pie) - -**When to use:** -- Adding UI components -- Building layouts -- Including interactive elements - ---- - -### build_artifact - -Compiles the artifact into a single, deliverable HTML file. - -```bash -python tools/build_artifact.py --artifact-dir <path> --output <path> [--minify] -``` - -**Arguments:** -- `--artifact-dir` (required): Path to artifact directory -- `--output` (required): Output HTML file path -- `--minify` (optional): Minify the output - -**When to use:** -- Creating final deliverable -- Bundling for distribution -- Generating standalone HTML - ---- - -### preview_artifact - -Generates a preview of the artifact. - -```bash -python tools/preview_artifact.py --artifact-dir <path> [--port <port>] -``` - -**Arguments:** -- `--artifact-dir` (required): Path to artifact directory -- `--port` (optional): Preview server port (default: 3000) - -**When to use:** -- Testing the artifact -- Visual verification -- Development iteration - -## Design Guidelines - -### Visual Excellence -- Use rich color palettes, not generic defaults -- Implement smooth animations and transitions -- Apply generous whitespace for breathing room -- Choose distinctive typography - -### Modern Aesthetics -- Glassmorphism effects for depth -- Gradient backgrounds and accents -- Subtle shadows and blur effects -- Micro-interactions on hover/focus - -### Responsive Design -- Mobile-first approach -- Flexible grids and layouts -- Appropriate breakpoints -- Touch-friendly interactions - -## Common Patterns - -### Create Landing Page -```bash -python tools/scaffold_artifact.py --name landing --type page --output-dir ./artifacts --features responsive,animations -python tools/add_component.py --artifact-dir ./artifacts/landing --component navbar -python tools/add_component.py --artifact-dir ./artifacts/landing --component button --variant primary -python tools/build_artifact.py --artifact-dir ./artifacts/landing --output ./landing.html -``` - -### Create Dashboard Widget -```bash -python tools/scaffold_artifact.py --name metrics --type widget --output-dir ./artifacts -python tools/add_component.py --artifact-dir ./artifacts/metrics --component card -python tools/add_component.py --artifact-dir ./artifacts/metrics --component chart --variant line -python tools/build_artifact.py --artifact-dir ./artifacts/metrics --output ./widget.html -``` - -## Best Practices - -1. **Start with scaffold** - Get proper boilerplate and structure -2. **Use components** - Don't build from scratch when components exist -3. **Test responsiveness** - Check at multiple breakpoints -4. **Preview before build** - Catch issues early -5. **Minify for production** - Smaller file size for delivery - -## Color Palette Suggestions - -Avoid generic colors. Use these curated palettes: - -**Professional Dark:** -- Background: `#0f172a` -- Surface: `#1e293b` -- Primary: `#3b82f6` -- Accent: `#f472b6` - -**Warm Light:** -- Background: `#fef7ee` -- Surface: `#ffffff` -- Primary: `#ea580c` -- Accent: `#0ea5e9` - -**Modern Neutral:** -- Background: `#18181b` -- Surface: `#27272a` -- Primary: `#a78bfa` -- Accent: `#34d399` - -## Dependencies - -Generates self-contained HTML with embedded: -- React 18 (via CDN) -- Tailwind CSS (via CDN) -- shadcn/ui component styles diff --git a/coderrr-skills/skills/artifacts-builder/tools/add_component.py b/coderrr-skills/skills/artifacts-builder/tools/add_component.py deleted file mode 100644 index 6083313..0000000 --- a/coderrr-skills/skills/artifacts-builder/tools/add_component.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python3 -""" -Add component templates to an artifact. - -Usage: - python add_component.py --artifact-dir ./my-artifact --component button --variant primary -""" - -import argparse -import sys -import json -from pathlib import Path - - -COMPONENTS = { - "button": { - "primary": ''' -const Button = ({ children, onClick, disabled }) => ( - <button - onClick={onClick} - disabled={disabled} - className="bg-primary hover:bg-blue-600 disabled:bg-slate-400 text-white px-6 py-2.5 rounded-lg font-medium transition-colors shadow-sm hover:shadow-md" - > - {children} - </button> -); -''', - "secondary": ''' -const Button = ({ children, onClick, disabled }) => ( - <button - onClick={onClick} - disabled={disabled} - className="bg-slate-200 dark:bg-slate-700 hover:bg-slate-300 dark:hover:bg-slate-600 text-slate-900 dark:text-white px-6 py-2.5 rounded-lg font-medium transition-colors" - > - {children} - </button> -); -''', - "outline": ''' -const Button = ({ children, onClick, disabled }) => ( - <button - onClick={onClick} - disabled={disabled} - className="border-2 border-primary text-primary hover:bg-primary hover:text-white px-6 py-2.5 rounded-lg font-medium transition-colors" - > - {children} - </button> -); -''' - }, - "card": { - "default": ''' -const Card = ({ title, children, footer }) => ( - <div className="bg-white dark:bg-slate-800 rounded-xl shadow-lg overflow-hidden"> - {title && ( - <div className="px-6 py-4 border-b border-slate-200 dark:border-slate-700"> - <h3 className="text-lg font-semibold text-slate-900 dark:text-white">{title}</h3> - </div> - )} - <div className="px-6 py-4">{children}</div> - {footer && ( - <div className="px-6 py-4 bg-slate-50 dark:bg-slate-800/50 border-t border-slate-200 dark:border-slate-700"> - {footer} - </div> - )} - </div> -); -''' - }, - "navbar": { - "default": ''' -const Navbar = ({ logo, links }) => ( - <nav className="bg-white dark:bg-slate-900 border-b border-slate-200 dark:border-slate-800 sticky top-0 z-50"> - <div className="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> - <div className="flex justify-between h-16 items-center"> - <div className="text-xl font-bold text-slate-900 dark:text-white">{logo}</div> - <div className="hidden md:flex space-x-8"> - {links?.map((link, i) => ( - <a key={i} href={link.href} className="text-slate-600 dark:text-slate-300 hover:text-primary transition-colors"> - {link.label} - </a> - ))} - </div> - </div> - </div> - </nav> -); -''' - }, - "table": { - "default": ''' -const Table = ({ headers, rows }) => ( - <div className="overflow-x-auto rounded-lg border border-slate-200 dark:border-slate-700"> - <table className="min-w-full divide-y divide-slate-200 dark:divide-slate-700"> - <thead className="bg-slate-50 dark:bg-slate-800"> - <tr> - {headers?.map((header, i) => ( - <th key={i} className="px-6 py-3 text-left text-xs font-medium text-slate-500 dark:text-slate-400 uppercase tracking-wider"> - {header} - </th> - ))} - </tr> - </thead> - <tbody className="bg-white dark:bg-slate-900 divide-y divide-slate-200 dark:divide-slate-700"> - {rows?.map((row, i) => ( - <tr key={i} className="hover:bg-slate-50 dark:hover:bg-slate-800/50"> - {row.map((cell, j) => ( - <td key={j} className="px-6 py-4 whitespace-nowrap text-slate-900 dark:text-white"> - {cell} - </td> - ))} - </tr> - ))} - </tbody> - </table> - </div> -); -''' - } -} - - -def add_component(artifact_dir: str, component: str, variant: str = None): - """Add a component to the artifact.""" - artifact_path = Path(artifact_dir) - - if not artifact_path.exists(): - raise ValueError(f"Artifact directory not found: {artifact_dir}") - - if component not in COMPONENTS: - return { - "error": f"Unknown component: {component}", - "available": list(COMPONENTS.keys()) - } - - variants = COMPONENTS[component] - variant = variant or "default" - if variant not in variants: - variant = list(variants.keys())[0] - - component_code = variants[variant] - - # Update components.json - config_file = artifact_path / 'components.json' - if config_file.exists(): - config = json.loads(config_file.read_text()) - else: - config = {"components": []} - - config["components"].append({ - "name": component, - "variant": variant - }) - config_file.write_text(json.dumps(config, indent=2)) - - # Save component code - components_dir = artifact_path / 'components' - components_dir.mkdir(exist_ok=True) - (components_dir / f'{component}.jsx').write_text(component_code) - - return { - "status": "success", - "component": component, - "variant": variant, - "file": str(components_dir / f'{component}.jsx') - } - - -def main(): - parser = argparse.ArgumentParser(description='Add component to artifact') - parser.add_argument('--artifact-dir', required=True, help='Artifact directory') - parser.add_argument('--component', required=True, help='Component name') - parser.add_argument('--variant', help='Component variant') - - args = parser.parse_args() - - try: - result = add_component(args.artifact_dir, args.component, args.variant) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/artifacts-builder/tools/build_artifact.py b/coderrr-skills/skills/artifacts-builder/tools/build_artifact.py deleted file mode 100644 index 4506738..0000000 --- a/coderrr-skills/skills/artifacts-builder/tools/build_artifact.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python3 -""" -Build artifact into single HTML file. - -Usage: - python build_artifact.py --artifact-dir ./my-artifact --output ./output.html -""" - -import argparse -import sys -import json -from pathlib import Path -import re - - -def build_artifact(artifact_dir: str, output: str, minify: bool = False): - """Build artifact into single HTML file.""" - artifact_path = Path(artifact_dir) - - if not artifact_path.exists(): - raise ValueError(f"Artifact directory not found: {artifact_dir}") - - index_file = artifact_path / 'index.html' - if not index_file.exists(): - raise ValueError("index.html not found in artifact directory") - - html = index_file.read_text() - - # Collect component code - components_dir = artifact_path / 'components' - if components_dir.exists(): - component_code = [] - for comp_file in components_dir.glob('*.jsx'): - component_code.append(comp_file.read_text()) - - if component_code: - # Inject components before main React code - combined = '\n'.join(component_code) - html = html.replace( - '<script type="text/babel">', - f'<script type="text/babel">\n{combined}\n' - ) - - # Minify if requested - if minify: - # Basic minification - remove extra whitespace - html = re.sub(r'\s+', ' ', html) - html = re.sub(r'>\s+<', '><', html) - - # Write output - output_path = Path(output) - output_path.parent.mkdir(parents=True, exist_ok=True) - output_path.write_text(html) - - return { - "status": "success", - "output": str(output_path), - "size": len(html), - "size_human": f"{len(html) / 1024:.1f} KB" - } - - -def main(): - parser = argparse.ArgumentParser(description='Build artifact') - parser.add_argument('--artifact-dir', required=True, help='Artifact directory') - parser.add_argument('--output', required=True, help='Output HTML file') - parser.add_argument('--minify', action='store_true', help='Minify output') - - args = parser.parse_args() - - try: - result = build_artifact(args.artifact_dir, args.output, args.minify) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/artifacts-builder/tools/preview_artifact.py b/coderrr-skills/skills/artifacts-builder/tools/preview_artifact.py deleted file mode 100644 index 728a07a..0000000 --- a/coderrr-skills/skills/artifacts-builder/tools/preview_artifact.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python3 -""" -Preview artifact in browser. - -Usage: - python preview_artifact.py --artifact-dir ./my-artifact -""" - -import argparse -import sys -import json -from pathlib import Path -import http.server -import socketserver -import webbrowser -import threading - - -def preview_artifact(artifact_dir: str, port: int = 3000): - """Start preview server and open browser.""" - artifact_path = Path(artifact_dir) - - if not artifact_path.exists(): - raise ValueError(f"Artifact directory not found: {artifact_dir}") - - index_file = artifact_path / 'index.html' - if not index_file.exists(): - raise ValueError("index.html not found in artifact directory") - - # Create simple HTTP server - Handler = http.server.SimpleHTTPRequestHandler - - class QuietHandler(Handler): - def log_message(self, format, *args): - pass # Suppress logging - - try: - with socketserver.TCPServer(("", port), QuietHandler) as httpd: - url = f"http://localhost:{port}/" - print(json.dumps({ - "status": "running", - "url": url, - "port": port, - "message": "Press Ctrl+C to stop" - })) - - # Open browser - webbrowser.open(url) - - # Change to artifact directory - import os - os.chdir(artifact_path) - - httpd.serve_forever() - except KeyboardInterrupt: - print("\nServer stopped") - except OSError as e: - if "Address already in use" in str(e): - print(json.dumps({ - "error": f"Port {port} is already in use", - "suggestion": f"Try --port {port + 1}" - })) - else: - raise - - -def main(): - parser = argparse.ArgumentParser(description='Preview artifact') - parser.add_argument('--artifact-dir', required=True, help='Artifact directory') - parser.add_argument('--port', type=int, default=3000, help='Server port') - - args = parser.parse_args() - - try: - preview_artifact(args.artifact_dir, args.port) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/artifacts-builder/tools/scaffold_artifact.py b/coderrr-skills/skills/artifacts-builder/tools/scaffold_artifact.py deleted file mode 100644 index be766f7..0000000 --- a/coderrr-skills/skills/artifacts-builder/tools/scaffold_artifact.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python3 -""" -Scaffold a new artifact structure. - -Usage: - python scaffold_artifact.py --name my-widget --type widget --output-dir ./artifacts -""" - -import argparse -import sys -import json -from pathlib import Path - - -HTML_TEMPLATE = '''<!DOCTYPE html> -<html lang="en" class="{dark_class}"> -<head> - <meta charset="UTF-8"> - <meta name="viewport" content="width=device-width, initial-scale=1.0"> - <title>{title} - - - - - - - - -
- - - - -''' - -REACT_TEMPLATES = { - "component": ''' -const App = () => { - return ( -
-
-

- Component Title -

-

- Your component content goes here. -

-
-
- ); -}; - -ReactDOM.createRoot(document.getElementById('root')).render(); -''', - "page": ''' -const Navbar = () => ( - -); - -const Hero = () => ( -
-
-

- Welcome to Your Page -

-

- A beautiful, modern landing page built with React and Tailwind CSS. -

- -
-
-); - -const App = () => ( - <> - - - -); - -ReactDOM.createRoot(document.getElementById('root')).render(); -''', - "widget": ''' -const Widget = () => { - const [count, setCount] = React.useState(0); - - return ( -
-
-
-
{count}
-

Counter Value

-
- - -
-
-
-
- ); -}; - -ReactDOM.createRoot(document.getElementById('root')).render(); -''', - "dashboard": ''' -const StatCard = ({ title, value, change }) => ( -
-

{title}

-

{value}

-

= 0 ? 'text-green-500' : 'text-red-500'}`}> - {change >= 0 ? '↑' : '↓'} {Math.abs(change)}% -

-
-); - -const Dashboard = () => { - const stats = [ - { title: 'Total Revenue', value: '$45,231', change: 12.5 }, - { title: 'Active Users', value: '2,345', change: 8.1 }, - { title: 'Conversion Rate', value: '3.2%', change: -2.4 }, - { title: 'Avg Session', value: '4m 32s', change: 15.3 }, - ]; - - return ( -
-

Dashboard

-
- {stats.map((stat, i) => ( - - ))} -
-
- ); -}; - -ReactDOM.createRoot(document.getElementById('root')).render(); -''' -} - - -def scaffold_artifact(name: str, artifact_type: str, output_dir: str, features: list = None): - """Create artifact scaffold.""" - features = features or [] - artifact_dir = Path(output_dir) / name - artifact_dir.mkdir(parents=True, exist_ok=True) - - # Generate HTML - dark_class = 'dark' if 'dark-mode' in features else '' - animations = 'transition-all duration-300' if 'animations' in features else '' - custom_styles = '' - - react_code = REACT_TEMPLATES.get(artifact_type, REACT_TEMPLATES['component']) - - html = HTML_TEMPLATE.format( - title=name.replace('-', ' ').title(), - dark_class=dark_class, - animations=animations, - custom_styles=custom_styles, - react_code=react_code - ) - - # Write files - (artifact_dir / 'index.html').write_text(html) - (artifact_dir / 'components.json').write_text(json.dumps({ - "name": name, - "type": artifact_type, - "features": features, - "components": [] - }, indent=2)) - - return { - "status": "success", - "artifact_dir": str(artifact_dir), - "files": [ - str(artifact_dir / 'index.html'), - str(artifact_dir / 'components.json') - ] - } - - -def main(): - parser = argparse.ArgumentParser(description='Scaffold an artifact') - parser.add_argument('--name', required=True, help='Artifact name') - parser.add_argument('--type', required=True, choices=['component', 'page', 'widget', 'dashboard']) - parser.add_argument('--output-dir', required=True, help='Output directory') - parser.add_argument('--features', help='Comma-separated features') - - args = parser.parse_args() - features = args.features.split(',') if args.features else [] - - try: - result = scaffold_artifact(args.name, args.type, args.output_dir, features) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/brand-guidelines/Skills.md b/coderrr-skills/skills/brand-guidelines/Skills.md deleted file mode 100644 index 99e5fb4..0000000 --- a/coderrr-skills/skills/brand-guidelines/Skills.md +++ /dev/null @@ -1,173 +0,0 @@ ---- -name: brand-guidelines -description: Apply official brand colors, typography, and design tokens to projects. Use this skill when the user wants to set up brand colors, configure typography, generate color palettes, create design tokens, or ensure brand consistency across a project. ---- - -This skill helps maintain brand consistency by managing colors, typography, and design tokens. It generates CSS variables, config files, and documentation for brand implementation. - -The user wants to apply brand styling to a project. They may provide brand colors, font choices, or want to generate a cohesive palette. - -## Approach - -When setting up brand guidelines: -1. **Define**: Use `set_brand` to configure core brand values -2. **Generate**: Use `generate_palette` for extended colors -3. **Export**: Use `export_tokens` for CSS/SCSS/JSON/Tailwind -4. **Document**: Use `create_styleguide` for team reference - -## Tools - -### set_brand - -Configures core brand values. - -```bash -python tools/set_brand.py --name --primary --secondary [--accent ] [--fonts ] -``` - -**Arguments:** -- `--name` (required): Brand/project name -- `--primary` (required): Primary brand color (hex) -- `--secondary` (required): Secondary color (hex) -- `--accent` (optional): Accent color (hex) -- `--fonts` (optional): Font configuration JSON - -**Fonts JSON:** -```json -{ - "heading": "Outfit", - "body": "Inter", - "mono": "JetBrains Mono" -} -``` - -**When to use:** -- Starting a new project -- Updating brand colors -- Setting typography - ---- - -### generate_palette - -Generates extended color palette from brand colors. - -```bash -python tools/generate_palette.py --brand [--include ] -``` - -**Arguments:** -- `--brand` (required): Brand name (from set_brand) -- `--include` (optional): What to generate (default: both) - -**Generates:** -- **Shades**: 50-950 scale for each color -- **Semantic**: success, warning, error, info colors - -**When to use:** -- Creating full color system -- Generating consistent shades -- Adding semantic colors - ---- - -### export_tokens - -Exports design tokens in various formats. - -```bash -python tools/export_tokens.py --brand --format --output -``` - -**Arguments:** -- `--brand` (required): Brand name -- `--format` (required): Output format - `css`, `scss`, `json`, `tailwind`, `figma` -- `--output` (required): Output file path - -**CSS Output Example:** -```css -:root { - --color-primary: #3b82f6; - --color-primary-50: #eff6ff; - --color-primary-500: #3b82f6; - --font-heading: 'Outfit', sans-serif; -} -``` - -**When to use:** -- Integrating with existing projects -- Setting up Tailwind config -- Sharing with design tools - ---- - -### create_styleguide - -Generates brand documentation. - -```bash -python tools/create_styleguide.py --brand --output [--format ] -``` - -**Arguments:** -- `--brand` (required): Brand name -- `--output` (required): Output file/directory -- `--format` (optional): Guide format (default: html) - -**When to use:** -- Documenting brand for team -- Creating design reference -- Onboarding designers - -## Common Patterns - -### Complete Brand Setup -```bash -# Define brand -python tools/set_brand.py --name myproject --primary "#6366f1" --secondary "#64748b" --accent "#f43f5e" --fonts '{"heading": "Outfit", "body": "Inter"}' - -# Generate extended palette -python tools/generate_palette.py --brand myproject --include both - -# Export for Tailwind -python tools/export_tokens.py --brand myproject --format tailwind --output tailwind.config.js - -# Create documentation -python tools/create_styleguide.py --brand myproject --output ./docs/brand -``` - -### Quick CSS Variables -```bash -python tools/set_brand.py --name quick --primary "#0ea5e9" --secondary "#1e293b" -python tools/export_tokens.py --brand quick --format css --output variables.css -``` - -## Color Guidelines - -**Primary**: Main brand color, buttons, links, key UI elements -**Secondary**: Supporting color, backgrounds, borders -**Accent**: Call-to-action, highlights, notifications - -**Shade Scale:** -- 50: Lightest (backgrounds) -- 100-200: Light variants -- 300-400: Muted variants -- 500: Base color -- 600-700: Darker variants -- 800-900: Darkest (text on light) -- 950: Near-black variant - -## Typography Guidelines - -**Heading fonts**: Display, expressive -- Outfit, Space Grotesk, Clash Display, Satoshi - -**Body fonts**: Readable, neutral -- Inter, Source Sans, Nunito Sans, DM Sans - -**Mono fonts**: Code, technical -- JetBrains Mono, Fira Code, IBM Plex Mono - -## Dependencies - -Uses Python standard library with optional `colormath` for advanced color operations. diff --git a/coderrr-skills/skills/brand-guidelines/tools/export_tokens.py b/coderrr-skills/skills/brand-guidelines/tools/export_tokens.py deleted file mode 100644 index 374595c..0000000 --- a/coderrr-skills/skills/brand-guidelines/tools/export_tokens.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python3 -""" -Export design tokens. - -Usage: - python export_tokens.py --brand myproject --format css --output variables.css -""" - -import argparse -import sys -import json -from pathlib import Path - - -BRANDS_DIR = Path.home() / '.coderrr' / 'brands' - - -def export_css(brand: dict) -> str: - """Export as CSS variables.""" - lines = [":root {"] - - # Colors - palette = brand.get("palette", {}).get("colors", {}) - for color_name, shades in palette.items(): - if isinstance(shades, dict): - for shade, value in shades.items(): - lines.append(f" --color-{color_name}-{shade}: {value};") - else: - lines.append(f" --color-{color_name}: {shades};") - - # Fonts - fonts = brand.get("fonts", {}) - for font_type, font_name in fonts.items(): - lines.append(f" --font-{font_type}: '{font_name}', sans-serif;") - - lines.append("}") - return '\n'.join(lines) - - -def export_scss(brand: dict) -> str: - """Export as SCSS variables.""" - lines = [] - - palette = brand.get("palette", {}).get("colors", {}) - for color_name, shades in palette.items(): - if isinstance(shades, dict): - for shade, value in shades.items(): - lines.append(f"${color_name}-{shade}: {value};") - else: - lines.append(f"${color_name}: {shades};") - - fonts = brand.get("fonts", {}) - for font_type, font_name in fonts.items(): - lines.append(f"$font-{font_type}: '{font_name}', sans-serif;") - - return '\n'.join(lines) - - -def export_tailwind(brand: dict) -> str: - """Export as Tailwind config.""" - config = { - "theme": { - "extend": { - "colors": {}, - "fontFamily": {} - } - } - } - - palette = brand.get("palette", {}).get("colors", {}) - for color_name, shades in palette.items(): - config["theme"]["extend"]["colors"][color_name] = shades - - fonts = brand.get("fonts", {}) - for font_type, font_name in fonts.items(): - config["theme"]["extend"]["fontFamily"][font_type] = [font_name, "sans-serif"] - - return f"module.exports = {json.dumps(config, indent=2)}" - - -def export_tokens(brand_name: str, format_type: str, output: str): - """Export design tokens.""" - brand_file = BRANDS_DIR / f"{brand_name}.json" - - if not brand_file.exists(): - raise ValueError(f"Brand not found: {brand_name}") - - brand = json.loads(brand_file.read_text()) - - exporters = { - 'css': export_css, - 'scss': export_scss, - 'tailwind': export_tailwind, - 'json': lambda b: json.dumps(b, indent=2) - } - - if format_type not in exporters: - raise ValueError(f"Unknown format: {format_type}") - - content = exporters[format_type](brand) - - output_path = Path(output) - output_path.parent.mkdir(parents=True, exist_ok=True) - output_path.write_text(content) - - return { - "status": "success", - "brand": brand_name, - "format": format_type, - "output": str(output_path) - } - - -def main(): - parser = argparse.ArgumentParser(description='Export design tokens') - parser.add_argument('--brand', required=True, help='Brand name') - parser.add_argument('--format', required=True, choices=['css', 'scss', 'json', 'tailwind']) - parser.add_argument('--output', required=True, help='Output file path') - - args = parser.parse_args() - - try: - result = export_tokens(args.brand, args.format, args.output) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/brand-guidelines/tools/generate_palette.py b/coderrr-skills/skills/brand-guidelines/tools/generate_palette.py deleted file mode 100644 index 58a9296..0000000 --- a/coderrr-skills/skills/brand-guidelines/tools/generate_palette.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python3 -""" -Generate extended color palette. - -Usage: - python generate_palette.py --brand myproject --include both -""" - -import argparse -import sys -import json -from pathlib import Path - - -BRANDS_DIR = Path.home() / '.coderrr' / 'brands' - - -def hex_to_rgb(hex_color: str) -> tuple: - """Convert hex to RGB.""" - hex_color = hex_color.lstrip('#') - return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) - - -def rgb_to_hex(rgb: tuple) -> str: - """Convert RGB to hex.""" - return '#{:02x}{:02x}{:02x}'.format(*[max(0, min(255, int(c))) for c in rgb]) - - -def lighten(hex_color: str, amount: float) -> str: - """Lighten a color.""" - r, g, b = hex_to_rgb(hex_color) - r = r + (255 - r) * amount - g = g + (255 - g) * amount - b = b + (255 - b) * amount - return rgb_to_hex((r, g, b)) - - -def darken(hex_color: str, amount: float) -> str: - """Darken a color.""" - r, g, b = hex_to_rgb(hex_color) - r = r * (1 - amount) - g = g * (1 - amount) - b = b * (1 - amount) - return rgb_to_hex((r, g, b)) - - -def generate_shades(base_color: str) -> dict: - """Generate 50-950 shade scale.""" - return { - "50": lighten(base_color, 0.95), - "100": lighten(base_color, 0.9), - "200": lighten(base_color, 0.75), - "300": lighten(base_color, 0.6), - "400": lighten(base_color, 0.3), - "500": base_color, - "600": darken(base_color, 0.1), - "700": darken(base_color, 0.25), - "800": darken(base_color, 0.4), - "900": darken(base_color, 0.55), - "950": darken(base_color, 0.7) - } - - -def generate_palette(brand_name: str, include: str = 'both'): - """Generate extended color palette.""" - brand_file = BRANDS_DIR / f"{brand_name}.json" - - if not brand_file.exists(): - raise ValueError(f"Brand not found: {brand_name}") - - brand = json.loads(brand_file.read_text()) - palette = {"colors": {}} - - if include in ['shades', 'both']: - # Generate shades for each brand color - for name, color in brand.get("colors", {}).items(): - palette["colors"][name] = generate_shades(color) - - if include in ['semantic', 'both']: - # Add semantic colors - palette["colors"]["success"] = generate_shades("#22c55e") - palette["colors"]["warning"] = generate_shades("#eab308") - palette["colors"]["error"] = generate_shades("#ef4444") - palette["colors"]["info"] = generate_shades("#3b82f6") - - # Update brand file with palette - brand["palette"] = palette - brand_file.write_text(json.dumps(brand, indent=2)) - - return { - "status": "success", - "brand": brand_name, - "generated": list(palette["colors"].keys()) - } - - -def main(): - parser = argparse.ArgumentParser(description='Generate color palette') - parser.add_argument('--brand', required=True, help='Brand name') - parser.add_argument('--include', default='both', choices=['shades', 'semantic', 'both']) - - args = parser.parse_args() - - try: - result = generate_palette(args.brand, args.include) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/brand-guidelines/tools/set_brand.py b/coderrr-skills/skills/brand-guidelines/tools/set_brand.py deleted file mode 100644 index 267bdd8..0000000 --- a/coderrr-skills/skills/brand-guidelines/tools/set_brand.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 -""" -Configure core brand values. - -Usage: - python set_brand.py --name myproject --primary "#3b82f6" --secondary "#64748b" -""" - -import argparse -import sys -import json -from pathlib import Path - - -BRANDS_DIR = Path.home() / '.coderrr' / 'brands' - - -def hex_to_rgb(hex_color: str) -> tuple: - """Convert hex to RGB tuple.""" - hex_color = hex_color.lstrip('#') - return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) - - -def set_brand(name: str, primary: str, secondary: str, accent: str = None, fonts: dict = None): - """Set brand configuration.""" - BRANDS_DIR.mkdir(parents=True, exist_ok=True) - - brand = { - "name": name, - "colors": { - "primary": primary, - "secondary": secondary - }, - "fonts": fonts or { - "heading": "Inter", - "body": "Inter", - "mono": "JetBrains Mono" - } - } - - if accent: - brand["colors"]["accent"] = accent - - # Save brand config - brand_file = BRANDS_DIR / f"{name}.json" - brand_file.write_text(json.dumps(brand, indent=2)) - - return { - "status": "success", - "brand": name, - "file": str(brand_file), - "colors": brand["colors"], - "fonts": brand["fonts"] - } - - -def main(): - parser = argparse.ArgumentParser(description='Set brand configuration') - parser.add_argument('--name', required=True, help='Brand name') - parser.add_argument('--primary', required=True, help='Primary color (hex)') - parser.add_argument('--secondary', required=True, help='Secondary color (hex)') - parser.add_argument('--accent', help='Accent color (hex)') - parser.add_argument('--fonts', help='Font configuration JSON') - - args = parser.parse_args() - - fonts = None - if args.fonts: - try: - fonts = json.loads(args.fonts) - except json.JSONDecodeError as e: - print(f"Error: Invalid fonts JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = set_brand(args.name, args.primary, args.secondary, args.accent, fonts) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/code-analyzer/Skills.md b/coderrr-skills/skills/code-analyzer/Skills.md deleted file mode 100644 index ae5e7a5..0000000 --- a/coderrr-skills/skills/code-analyzer/Skills.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -name: code-analyzer -description: Analyze code quality, structure, and maintainability. Use this skill when the user asks to lint code, count lines of code, find TODO/FIXME comments, analyze code structure, check for issues, or audit a codebase. Provides static analysis for Python and line counting for multiple languages. ---- - -This skill provides code quality analysis without external dependencies. It uses Python's AST module for static analysis and pattern matching for comment detection, supporting multiple programming languages for line counting. - -The user provides code files or directories to analyze. They may want quality checks, metrics, or to find action items like TODOs scattered through the codebase. - -## Approach - -Before invoking tools, understand the analysis goal: -- **Quality check**: Use `lint_python` for syntax and import issues -- **Size metrics**: Use `count_lines` for comprehensive line statistics -- **Action items**: Use `find_todos` to locate TODO, FIXME, HACK markers -- **Full audit**: Run all three tools sequentially for complete picture - -## Tools - -### lint_python - -Performs static analysis on Python files using the AST module. Detects syntax errors, unused imports, and provides code structure metrics. - -```bash -python tools/lint_python.py --file -``` - -**Arguments:** -- `--file` (required): Path to Python file to analyze - -**Output:** JSON with errors, warnings, and info (function/class/import counts). - -**What it detects:** -- Syntax errors (with line numbers) -- Unused imports -- Function and class counts -- Import analysis - -**When to use:** -- Quick quality check before committing -- Finding unused imports to clean up -- Getting code structure overview -- Validating Python syntax - -**Limitations:** Uses only stdlib AST, so it won't detect runtime errors, type issues, or complex linting rules that tools like flake8/pylint catch. - ---- - -### count_lines - -Counts lines of code with detailed breakdown by type (code, comments, blank) and language. - -```bash -python tools/count_lines.py --path -``` - -**Arguments:** -- `--path` (required): File or directory to analyze - -**Output:** JSON with summary totals and per-language breakdown. - -**Supported languages:** -- Python (`.py`) -- JavaScript/TypeScript (`.js`, `.ts`, `.jsx`, `.tsx`) -- Java (`.java`) -- C/C++ (`.c`, `.cpp`, `.h`, `.hpp`) -- Go (`.go`) -- Rust (`.rs`) -- Ruby (`.rb`) -- PHP, Swift, Kotlin, Scala, C# - -**When to use:** -- Estimating project size -- Comparing code vs comment ratios -- Understanding language distribution -- Tracking codebase growth - ---- - -### find_todos - -Finds TODO, FIXME, HACK, XXX, BUG, and NOTE comments throughout codebase. - -```bash -python tools/find_todos.py --path [--types ] -``` - -**Arguments:** -- `--path` (required): File or directory to search -- `--types` (optional): Comma-separated marker types (default: `TODO,FIXME,HACK,XXX,BUG,NOTE`) - -**Output:** JSON with count, breakdown by type, and list of all items with file/line/text. - -**When to use:** -- Reviewing technical debt -- Finding incomplete implementations -- Tracking known issues in code -- Generating action item lists - -## Common Patterns - -### Quick Python File Check -```bash -python tools/lint_python.py --file ./main.py -``` - -### Full Directory Analysis -```bash -python tools/count_lines.py --path ./src -``` - -### Find Only Critical Items -```bash -python tools/find_todos.py --path ./src --types FIXME,BUG -``` - -### Complete Code Audit -```bash -# Run all three for comprehensive analysis -python tools/lint_python.py --file ./main.py -python tools/count_lines.py --path ./src -python tools/find_todos.py --path ./src -``` - -## Best Practices - -1. **Run lint before commits** - Catch syntax errors and unused imports early -2. **Track line counts over time** - Monitor codebase growth -3. **Review TODOs regularly** - Don't let technical debt accumulate -4. **Focus on high-priority markers** - FIXME and BUG are usually more urgent than TODO -5. **Combine with file-search** - Find specific files first, then analyze them - -## Interpreting Results - -### lint_python Output -```json -{ - "file": "./main.py", - "errors": [], // Syntax errors - must fix - "warnings": [ // Quality issues - should fix - {"line": 1, "type": "unused_import", "message": "Unused import: os"} - ], - "info": { - "functions": 5, // Code structure overview - "classes": 2, - "imports": 8 - } -} -``` - -### count_lines Output -```json -{ - "summary": { - "total_lines": 1500, - "code_lines": 1100, // Executable code - "comment_lines": 200, // Documentation - "blank_lines": 200 // Formatting - } -} -``` - -A healthy ratio is roughly 70-80% code, 10-20% comments, 10-15% blank lines. - -## Error Handling - -| Exit Code | Meaning | Recovery | -|-----------|---------|----------| -| 0 | Success | - | -| 1 | Invalid file path | Verify file exists | -| 2 | File parsing error | Check file encoding, syntax | - -## Dependencies - -None - uses Python's standard library only (ast, os, re, json). diff --git a/coderrr-skills/skills/code-analyzer/requirements.txt b/coderrr-skills/skills/code-analyzer/requirements.txt deleted file mode 100644 index 0f0cc73..0000000 --- a/coderrr-skills/skills/code-analyzer/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -# No external dependencies required -# This skill uses Python's standard library only diff --git a/coderrr-skills/skills/code-analyzer/tools/count_lines.py b/coderrr-skills/skills/code-analyzer/tools/count_lines.py deleted file mode 100644 index 0cc8cf9..0000000 --- a/coderrr-skills/skills/code-analyzer/tools/count_lines.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/env python3 -""" -Count lines of code with detailed breakdown. - -This tool counts total lines, code lines, comment lines, and blank lines -for various programming languages. - -Usage: - python count_lines.py --path ./main.py - python count_lines.py --path ./src - -Exit Codes: - 0 - Success - 1 - Invalid path -""" - -import argparse -import sys -import json -import os -from pathlib import Path -from typing import Dict, Any -from collections import defaultdict - - -# Language definitions: extension -> (name, single_comment, multi_start, multi_end) -LANGUAGES = { - '.py': ('python', '#', '"""', '"""'), - '.pyw': ('python', '#', '"""', '"""'), - '.js': ('javascript', '//', '/*', '*/'), - '.jsx': ('javascript', '//', '/*', '*/'), - '.ts': ('typescript', '//', '/*', '*/'), - '.tsx': ('typescript', '//', '/*', '*/'), - '.java': ('java', '//', '/*', '*/'), - '.c': ('c', '//', '/*', '*/'), - '.h': ('c', '//', '/*', '*/'), - '.cpp': ('cpp', '//', '/*', '*/'), - '.hpp': ('cpp', '//', '/*', '*/'), - '.cc': ('cpp', '//', '/*', '*/'), - '.go': ('go', '//', '/*', '*/'), - '.rs': ('rust', '//', '/*', '*/'), - '.rb': ('ruby', '#', '=begin', '=end'), - '.php': ('php', '//', '/*', '*/'), - '.swift': ('swift', '//', '/*', '*/'), - '.kt': ('kotlin', '//', '/*', '*/'), - '.scala': ('scala', '//', '/*', '*/'), - '.cs': ('csharp', '//', '/*', '*/'), -} - - -def count_file_lines(file_path: Path) -> Dict[str, int]: - """Count lines in a single file.""" - result = { - 'total_lines': 0, - 'code_lines': 0, - 'comment_lines': 0, - 'blank_lines': 0 - } - - ext = file_path.suffix.lower() - if ext not in LANGUAGES: - return result - - _, single_comment, multi_start, multi_end = LANGUAGES[ext] - - try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: - in_multiline = False - - for line in f: - result['total_lines'] += 1 - stripped = line.strip() - - if not stripped: - result['blank_lines'] += 1 - continue - - # Handle multiline comments - if in_multiline: - result['comment_lines'] += 1 - if multi_end in stripped: - in_multiline = False - continue - - # Check for multiline comment start - if multi_start in stripped: - result['comment_lines'] += 1 - if multi_end not in stripped[stripped.index(multi_start) + len(multi_start):]: - in_multiline = True - continue - - # Check for single-line comment - if stripped.startswith(single_comment): - result['comment_lines'] += 1 - continue - - # It's a code line - result['code_lines'] += 1 - - except (IOError, OSError): - pass - - return result - - -def count_lines(path_str: str) -> Dict[str, Any]: - """ - Count lines of code in a file or directory. - - Args: - path_str: Path to file or directory - - Returns: - Dictionary with line counts - - Raises: - ValueError: If path doesn't exist - """ - path = Path(path_str) - - if not path.exists(): - raise ValueError(f"Path does not exist: {path_str}") - - summary = { - 'total_lines': 0, - 'code_lines': 0, - 'comment_lines': 0, - 'blank_lines': 0 - } - - by_language: Dict[str, Dict[str, int]] = defaultdict( - lambda: {'files': 0, 'total_lines': 0, 'code_lines': 0, 'comment_lines': 0, 'blank_lines': 0} - ) - - files_to_process = [] - - if path.is_file(): - files_to_process.append(path) - else: - for root, _, files in os.walk(path): - for filename in files: - file_path = Path(root) / filename - if file_path.suffix.lower() in LANGUAGES: - files_to_process.append(file_path) - - for file_path in files_to_process: - counts = count_file_lines(file_path) - - if counts['total_lines'] > 0: - ext = file_path.suffix.lower() - lang_name = LANGUAGES.get(ext, ('unknown',))[0] - - summary['total_lines'] += counts['total_lines'] - summary['code_lines'] += counts['code_lines'] - summary['comment_lines'] += counts['comment_lines'] - summary['blank_lines'] += counts['blank_lines'] - - by_language[lang_name]['files'] += 1 - by_language[lang_name]['total_lines'] += counts['total_lines'] - by_language[lang_name]['code_lines'] += counts['code_lines'] - by_language[lang_name]['comment_lines'] += counts['comment_lines'] - by_language[lang_name]['blank_lines'] += counts['blank_lines'] - - return { - 'path': str(path), - 'summary': summary, - 'by_language': dict(by_language) - } - - -def main(): - parser = argparse.ArgumentParser( - description='Count lines of code', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python count_lines.py --path ./main.py - python count_lines.py --path ./src - ''' - ) - parser.add_argument( - '--path', - required=True, - help='File or directory to analyze' - ) - - args = parser.parse_args() - - try: - result = count_lines(args.path) - print(json.dumps(result, indent=2)) - except ValueError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/code-analyzer/tools/find_todos.py b/coderrr-skills/skills/code-analyzer/tools/find_todos.py deleted file mode 100644 index b934d57..0000000 --- a/coderrr-skills/skills/code-analyzer/tools/find_todos.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python3 -""" -Find TODO, FIXME, HACK, and XXX comments in code. - -This tool searches for common task markers in code comments -and outputs their locations. - -Usage: - python find_todos.py --path ./src - python find_todos.py --path ./src --types FIXME,TODO - -Exit Codes: - 0 - Success - 1 - Invalid path -""" - -import argparse -import sys -import json -import os -import re -from pathlib import Path -from typing import Dict, Any, List - - -# Default markers to search for -DEFAULT_MARKERS = ['TODO', 'FIXME', 'HACK', 'XXX', 'BUG', 'NOTE'] - -# File extensions to search -SEARCHABLE_EXTENSIONS = { - '.py', '.pyw', '.js', '.jsx', '.ts', '.tsx', '.java', '.c', '.cpp', '.h', '.hpp', - '.cc', '.cs', '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.scala', '.r', - '.sql', '.sh', '.bash', '.zsh', '.ps1', '.bat', '.cmd', - '.html', '.css', '.scss', '.sass', '.less', '.vue', '.svelte', - '.md', '.txt', '.rst', '.yaml', '.yml', '.toml', '.xml' -} - - -def find_todos_in_file(file_path: Path, markers: List[str]) -> List[Dict[str, Any]]: - """Find TODO-like comments in a single file.""" - todos = [] - - # Build pattern for markers - pattern = r'\b(' + '|'.join(re.escape(m) for m in markers) + r')[\s:]*(.*)$' - regex = re.compile(pattern, re.IGNORECASE) - - try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: - for line_num, line in enumerate(f, 1): - match = regex.search(line) - if match: - marker_type = match.group(1).upper() - text = match.group(2).strip() - - # Clean up the text (remove trailing comment markers) - text = re.sub(r'[\*/]+\s*$', '', text).strip() - - todos.append({ - 'file': str(file_path), - 'line': line_num, - 'type': marker_type, - 'text': text if text else '(no description)' - }) - except (IOError, OSError): - pass - - return todos - - -def find_todos(path_str: str, marker_types: List[str] = None) -> Dict[str, Any]: - """ - Find TODO-like comments in files. - - Args: - path_str: Path to file or directory - marker_types: List of marker types to search for - - Returns: - Dictionary with found items - - Raises: - ValueError: If path doesn't exist - """ - path = Path(path_str) - - if not path.exists(): - raise ValueError(f"Path does not exist: {path_str}") - - markers = marker_types if marker_types else DEFAULT_MARKERS - all_todos = [] - - if path.is_file(): - all_todos.extend(find_todos_in_file(path, markers)) - else: - for root, _, files in os.walk(path): - for filename in files: - file_path = Path(root) / filename - if file_path.suffix.lower() in SEARCHABLE_EXTENSIONS: - all_todos.extend(find_todos_in_file(file_path, markers)) - - # Sort by file and line number - all_todos.sort(key=lambda x: (x['file'], x['line'])) - - # Group by type for summary - by_type: Dict[str, int] = {} - for todo in all_todos: - by_type[todo['type']] = by_type.get(todo['type'], 0) + 1 - - return { - 'count': len(all_todos), - 'by_type': by_type, - 'items': all_todos - } - - -def main(): - parser = argparse.ArgumentParser( - description='Find TODO, FIXME, HACK, and XXX comments', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python find_todos.py --path ./src - python find_todos.py --path ./src --types FIXME,TODO - python find_todos.py --path ./main.py - ''' - ) - parser.add_argument( - '--path', - required=True, - help='File or directory to search' - ) - parser.add_argument( - '--types', - help='Comma-separated list of marker types (default: TODO,FIXME,HACK,XXX,BUG,NOTE)' - ) - - args = parser.parse_args() - - marker_types = None - if args.types: - marker_types = [t.strip().upper() for t in args.types.split(',')] - - try: - result = find_todos(args.path, marker_types) - print(json.dumps(result, indent=2)) - except ValueError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/code-analyzer/tools/lint_python.py b/coderrr-skills/skills/code-analyzer/tools/lint_python.py deleted file mode 100644 index 01c9341..0000000 --- a/coderrr-skills/skills/code-analyzer/tools/lint_python.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python3 -""" -Basic Python linting using the AST module. - -This tool performs static analysis on Python files to detect common issues -like syntax errors, unused imports, and provides code statistics. - -Usage: - python lint_python.py --file ./main.py - -Exit Codes: - 0 - Success (even if issues found) - 1 - Invalid file path - 2 - Unable to parse file -""" - -import argparse -import sys -import json -import ast -from pathlib import Path -from typing import Dict, Any, List, Set - - -class ImportVisitor(ast.NodeVisitor): - """AST visitor to collect import information.""" - - def __init__(self): - self.imports: Set[str] = set() - self.from_imports: Dict[str, List[str]] = {} - self.all_names: Set[str] = set() - - def visit_Import(self, node): - for alias in node.names: - name = alias.asname if alias.asname else alias.name - self.imports.add(name) - self.generic_visit(node) - - def visit_ImportFrom(self, node): - module = node.module or '' - for alias in node.names: - name = alias.asname if alias.asname else alias.name - if name == '*': - continue - self.imports.add(name) - if module not in self.from_imports: - self.from_imports[module] = [] - self.from_imports[module].append(name) - self.generic_visit(node) - - -class NameVisitor(ast.NodeVisitor): - """AST visitor to collect all name usages.""" - - def __init__(self): - self.used_names: Set[str] = set() - self.defined_names: Set[str] = set() - self.function_count = 0 - self.class_count = 0 - - def visit_Name(self, node): - if isinstance(node.ctx, ast.Load): - self.used_names.add(node.id) - elif isinstance(node.ctx, ast.Store): - self.defined_names.add(node.id) - self.generic_visit(node) - - def visit_FunctionDef(self, node): - self.function_count += 1 - self.defined_names.add(node.name) - self.generic_visit(node) - - def visit_AsyncFunctionDef(self, node): - self.function_count += 1 - self.defined_names.add(node.name) - self.generic_visit(node) - - def visit_ClassDef(self, node): - self.class_count += 1 - self.defined_names.add(node.name) - self.generic_visit(node) - - -def lint_python(file_path: str) -> Dict[str, Any]: - """ - Perform basic linting on a Python file. - - Args: - file_path: Path to the Python file - - Returns: - Dictionary with lint results - - Raises: - ValueError: If file doesn't exist or isn't Python - SyntaxError: If file has syntax errors - """ - path = Path(file_path) - - if not path.exists(): - raise ValueError(f"File does not exist: {file_path}") - - if path.suffix != '.py': - raise ValueError(f"Not a Python file: {file_path}") - - with open(path, 'r', encoding='utf-8') as f: - source = f.read() - - result = { - 'file': str(path), - 'errors': [], - 'warnings': [], - 'info': { - 'functions': 0, - 'classes': 0, - 'imports': 0 - } - } - - # Try to parse the file - try: - tree = ast.parse(source, filename=str(path)) - except SyntaxError as e: - result['errors'].append({ - 'line': e.lineno, - 'type': 'syntax_error', - 'message': str(e.msg) - }) - return result - - # Collect imports - import_visitor = ImportVisitor() - import_visitor.visit(tree) - - # Collect name usages - name_visitor = NameVisitor() - name_visitor.visit(tree) - - # Check for unused imports - for imp in import_visitor.imports: - if imp not in name_visitor.used_names: - result['warnings'].append({ - 'line': 1, # AST doesn't easily give us the line for this - 'type': 'unused_import', - 'message': f"Unused import: {imp}" - }) - - # Update info - result['info']['functions'] = name_visitor.function_count - result['info']['classes'] = name_visitor.class_count - result['info']['imports'] = len(import_visitor.imports) - - return result - - -def main(): - parser = argparse.ArgumentParser( - description='Basic Python linting', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python lint_python.py --file ./main.py - python lint_python.py --file ./src/utils.py - ''' - ) - parser.add_argument( - '--file', - required=True, - help='Python file to lint' - ) - - args = parser.parse_args() - - try: - result = lint_python(args.file) - print(json.dumps(result, indent=2)) - except ValueError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - except Exception as e: - print(f"Error parsing file: {e}", file=sys.stderr) - sys.exit(2) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/docx/Skills.md b/coderrr-skills/skills/docx/Skills.md deleted file mode 100644 index a06b659..0000000 --- a/coderrr-skills/skills/docx/Skills.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -name: docx -description: Create, edit, and analyze Word documents with professional formatting. Use this skill when the user asks to create Word documents, add content to DOCX files, extract text from Word files, work with tables, headers, footers, or analyze document structure. Supports tracked changes, comments, and advanced formatting. ---- - -This skill provides comprehensive Word document manipulation using python-docx. It handles document creation, content extraction, formatting, and structural analysis. - -The user provides document requirements or existing files to process. They may want to create new documents, modify existing ones, or extract information from Word files. - -## Approach - -Before invoking tools, understand the document operation: -- **Create new**: Use `create_docx` with content structure -- **Extract content**: Use `read_docx` to get text, tables, or metadata -- **Modify existing**: Use `edit_docx` to add or update content -- **Analyze structure**: Use `analyze_docx` for document breakdown - -## Tools - -### create_docx - -Creates a new Word document with specified content and formatting. - -```bash -python tools/create_docx.py --output --title [--content <json>] [--template <path>] -``` - -**Arguments:** -- `--output` (required): Output file path (.docx) -- `--title` (required): Document title -- `--content` (optional): JSON structure defining document content -- `--template` (optional): Path to template document - -**Content JSON Structure:** -```json -{ - "sections": [ - {"type": "heading", "level": 1, "text": "Main Title"}, - {"type": "paragraph", "text": "Body text here..."}, - {"type": "heading", "level": 2, "text": "Subsection"}, - {"type": "list", "items": ["Item 1", "Item 2", "Item 3"], "ordered": false}, - {"type": "table", "headers": ["Col1", "Col2"], "rows": [["A", "B"], ["C", "D"]]} - ] -} -``` - -**When to use:** -- Generating reports -- Creating structured documents -- Building documents from templates -- Automating document workflows - ---- - -### read_docx - -Extracts content from existing Word documents. - -```bash -python tools/read_docx.py --file <path> [--format <text|json|markdown>] [--include-tables] -``` - -**Arguments:** -- `--file` (required): Path to Word document -- `--format` (optional): Output format - `text`, `json`, or `markdown` (default: text) -- `--include-tables` (optional): Include table data in output - -**Output:** Document content in specified format. - -**When to use:** -- Extracting text for analysis -- Converting Word to other formats -- Reading document structure -- Processing uploaded documents - ---- - -### edit_docx - -Modifies an existing Word document. - -```bash -python tools/edit_docx.py --file <path> --output <path> --operations <json> -``` - -**Arguments:** -- `--file` (required): Input Word document -- `--output` (required): Output file path -- `--operations` (required): JSON array of edit operations - -**Operations JSON:** -```json -[ - {"action": "append_paragraph", "text": "New paragraph"}, - {"action": "replace_text", "find": "old text", "replace": "new text"}, - {"action": "add_heading", "text": "New Section", "level": 2}, - {"action": "insert_table", "headers": ["A", "B"], "rows": [["1", "2"]]} -] -``` - -**When to use:** -- Adding content to existing documents -- Find and replace operations -- Appending sections -- Batch document updates - ---- - -### analyze_docx - -Analyzes document structure and provides detailed metadata. - -```bash -python tools/analyze_docx.py --file <path> -``` - -**Arguments:** -- `--file` (required): Path to Word document - -**Output:** JSON with word count, paragraph count, heading structure, table count, styles used, and more. - -**When to use:** -- Auditing document structure -- Checking document properties -- Understanding document composition -- Quality assurance checks - -## Common Patterns - -### Create a Simple Report -```bash -python tools/create_docx.py --output report.docx --title "Monthly Report" --content '{"sections": [{"type": "heading", "level": 1, "text": "Summary"}, {"type": "paragraph", "text": "This month we achieved..."}]}' -``` - -### Extract All Text -```bash -python tools/read_docx.py --file document.docx --format text -``` - -### Add Section to Existing Document -```bash -python tools/edit_docx.py --file original.docx --output updated.docx --operations '[{"action": "append_paragraph", "text": "Additional content here"}]' -``` - -### Get Document Statistics -```bash -python tools/analyze_docx.py --file document.docx -``` - -## Best Practices - -1. **Use templates** - Start from well-formatted templates for consistent styling -2. **Structure content as JSON** - Makes complex documents reproducible -3. **Preserve originals** - Always output to new file when editing -4. **Check analysis first** - Understand document structure before modifying -5. **Use markdown format** - Great for further processing or display - -## Dependencies - -Requires `python-docx>=0.8.11`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/docx/requirements.txt b/coderrr-skills/skills/docx/requirements.txt deleted file mode 100644 index 339aa01..0000000 --- a/coderrr-skills/skills/docx/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -python-docx>=0.8.11 diff --git a/coderrr-skills/skills/docx/tools/analyze_docx.py b/coderrr-skills/skills/docx/tools/analyze_docx.py deleted file mode 100644 index c65a866..0000000 --- a/coderrr-skills/skills/docx/tools/analyze_docx.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python3 -""" -Analyze Word document structure and metadata. - -Usage: - python analyze_docx.py --file document.docx -""" - -import argparse -import sys -import json -from pathlib import Path -from collections import Counter - -try: - from docx import Document -except ImportError: - print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) - sys.exit(1) - - -def analyze_docx(file_path: str) -> dict: - """Analyze document structure and provide metadata.""" - doc = Document(file_path) - - # Count words and paragraphs - word_count = 0 - paragraph_count = 0 - heading_count = 0 - styles_used = Counter() - headings = [] - - for para in doc.paragraphs: - if para.text.strip(): - paragraph_count += 1 - word_count += len(para.text.split()) - - style_name = para.style.name if para.style else "Normal" - styles_used[style_name] += 1 - - if 'Heading' in style_name: - heading_count += 1 - headings.append({ - "text": para.text[:100], # Truncate long headings - "style": style_name - }) - - # Count tables - table_count = len(doc.tables) - - # Get document properties - core_props = doc.core_properties - - return { - "file": str(file_path), - "statistics": { - "word_count": word_count, - "paragraph_count": paragraph_count, - "heading_count": heading_count, - "table_count": table_count, - "section_count": len(doc.sections) - }, - "structure": { - "headings": headings[:20], # Limit to first 20 - "styles_used": dict(styles_used.most_common(10)) - }, - "properties": { - "title": core_props.title or "", - "author": core_props.author or "", - "created": str(core_props.created) if core_props.created else "", - "modified": str(core_props.modified) if core_props.modified else "" - } - } - - -def main(): - parser = argparse.ArgumentParser(description='Analyze Word documents') - parser.add_argument('--file', required=True, help='Path to Word document') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = analyze_docx(args.file) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/docx/tools/create_docx.py b/coderrr-skills/skills/docx/tools/create_docx.py deleted file mode 100644 index 4a8e13e..0000000 --- a/coderrr-skills/skills/docx/tools/create_docx.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 -""" -Create Word documents with structured content. - -Usage: - python create_docx.py --output report.docx --title "Report" --content '{"sections": [...]}' -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from docx import Document - from docx.shared import Inches, Pt - from docx.enum.text import WD_ALIGN_PARAGRAPH -except ImportError: - print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) - sys.exit(1) - - -def add_content_section(doc, section): - """Add a content section to the document.""" - section_type = section.get('type', 'paragraph') - - if section_type == 'heading': - level = section.get('level', 1) - doc.add_heading(section.get('text', ''), level=level) - - elif section_type == 'paragraph': - para = doc.add_paragraph(section.get('text', '')) - if section.get('bold'): - for run in para.runs: - run.bold = True - if section.get('italic'): - for run in para.runs: - run.italic = True - - elif section_type == 'list': - items = section.get('items', []) - ordered = section.get('ordered', False) - style = 'List Number' if ordered else 'List Bullet' - for item in items: - doc.add_paragraph(item, style=style) - - elif section_type == 'table': - headers = section.get('headers', []) - rows = section.get('rows', []) - - if headers: - table = doc.add_table(rows=1, cols=len(headers)) - table.style = 'Table Grid' - - # Add headers - header_cells = table.rows[0].cells - for i, header in enumerate(headers): - header_cells[i].text = str(header) - - # Add data rows - for row_data in rows: - row_cells = table.add_row().cells - for i, cell_data in enumerate(row_data): - if i < len(row_cells): - row_cells[i].text = str(cell_data) - - elif section_type == 'page_break': - doc.add_page_break() - - -def create_docx(output_path: str, title: str, content: dict = None, template_path: str = None): - """Create a Word document.""" - if template_path and Path(template_path).exists(): - doc = Document(template_path) - else: - doc = Document() - - # Add title - doc.add_heading(title, level=0) - - # Add content sections - if content and 'sections' in content: - for section in content['sections']: - add_content_section(doc, section) - - # Save document - doc.save(output_path) - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Create Word documents') - parser.add_argument('--output', required=True, help='Output file path (.docx)') - parser.add_argument('--title', required=True, help='Document title') - parser.add_argument('--content', help='JSON structure defining document content') - parser.add_argument('--template', help='Path to template document') - - args = parser.parse_args() - - content = None - if args.content: - try: - content = json.loads(args.content) - except json.JSONDecodeError as e: - print(f"Error: Invalid content JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = create_docx(args.output, args.title, content, args.template) - print(json.dumps({"status": "success", "file": result})) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/docx/tools/edit_docx.py b/coderrr-skills/skills/docx/tools/edit_docx.py deleted file mode 100644 index 0248a26..0000000 --- a/coderrr-skills/skills/docx/tools/edit_docx.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 -""" -Edit existing Word documents. - -Usage: - python edit_docx.py --file input.docx --output output.docx --operations '[...]' -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from docx import Document -except ImportError: - print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) - sys.exit(1) - - -def apply_operation(doc, operation): - """Apply a single edit operation to the document.""" - action = operation.get('action') - - if action == 'append_paragraph': - doc.add_paragraph(operation.get('text', '')) - - elif action == 'add_heading': - level = operation.get('level', 1) - doc.add_heading(operation.get('text', ''), level=level) - - elif action == 'replace_text': - find_text = operation.get('find', '') - replace_text = operation.get('replace', '') - for para in doc.paragraphs: - if find_text in para.text: - for run in para.runs: - if find_text in run.text: - run.text = run.text.replace(find_text, replace_text) - - elif action == 'insert_table': - headers = operation.get('headers', []) - rows = operation.get('rows', []) - - if headers: - table = doc.add_table(rows=1, cols=len(headers)) - table.style = 'Table Grid' - - header_cells = table.rows[0].cells - for i, header in enumerate(headers): - header_cells[i].text = str(header) - - for row_data in rows: - row_cells = table.add_row().cells - for i, cell_data in enumerate(row_data): - if i < len(row_cells): - row_cells[i].text = str(cell_data) - - elif action == 'add_page_break': - doc.add_page_break() - - elif action == 'add_list': - items = operation.get('items', []) - ordered = operation.get('ordered', False) - style = 'List Number' if ordered else 'List Bullet' - for item in items: - doc.add_paragraph(item, style=style) - - -def edit_docx(input_path: str, output_path: str, operations: list): - """Edit a Word document with specified operations.""" - doc = Document(input_path) - - for operation in operations: - apply_operation(doc, operation) - - doc.save(output_path) - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Edit Word documents') - parser.add_argument('--file', required=True, help='Input Word document') - parser.add_argument('--output', required=True, help='Output file path') - parser.add_argument('--operations', required=True, help='JSON array of edit operations') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - operations = json.loads(args.operations) - except json.JSONDecodeError as e: - print(f"Error: Invalid operations JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = edit_docx(args.file, args.output, operations) - print(json.dumps({"status": "success", "file": result})) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/docx/tools/read_docx.py b/coderrr-skills/skills/docx/tools/read_docx.py deleted file mode 100644 index 4ef7a6a..0000000 --- a/coderrr-skills/skills/docx/tools/read_docx.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/env python3 -""" -Read and extract content from Word documents. - -Usage: - python read_docx.py --file document.docx --format text -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from docx import Document -except ImportError: - print("Error: 'python-docx' package is required. Install with: pip install python-docx", file=sys.stderr) - sys.exit(1) - - -def extract_tables(doc): - """Extract all tables from document.""" - tables = [] - for table in doc.tables: - table_data = [] - for row in table.rows: - row_data = [cell.text for cell in row.cells] - table_data.append(row_data) - tables.append(table_data) - return tables - - -def read_docx_text(file_path: str, include_tables: bool = False) -> str: - """Extract text content from Word document.""" - doc = Document(file_path) - - paragraphs = [para.text for para in doc.paragraphs if para.text.strip()] - text = '\n\n'.join(paragraphs) - - if include_tables: - tables = extract_tables(doc) - for i, table in enumerate(tables): - text += f"\n\n[Table {i + 1}]\n" - for row in table: - text += ' | '.join(row) + '\n' - - return text - - -def read_docx_json(file_path: str, include_tables: bool = False) -> dict: - """Extract structured content from Word document.""" - doc = Document(file_path) - - result = { - "paragraphs": [], - "headings": [], - "tables": [] - } - - for para in doc.paragraphs: - if para.text.strip(): - style_name = para.style.name if para.style else "Normal" - if 'Heading' in style_name: - result["headings"].append({ - "text": para.text, - "level": style_name - }) - result["paragraphs"].append({ - "text": para.text, - "style": style_name - }) - - if include_tables: - result["tables"] = extract_tables(doc) - - return result - - -def read_docx_markdown(file_path: str, include_tables: bool = False) -> str: - """Convert Word document to Markdown.""" - doc = Document(file_path) - - md_lines = [] - - for para in doc.paragraphs: - if not para.text.strip(): - continue - - style_name = para.style.name if para.style else "Normal" - - if 'Heading 1' in style_name: - md_lines.append(f"# {para.text}") - elif 'Heading 2' in style_name: - md_lines.append(f"## {para.text}") - elif 'Heading 3' in style_name: - md_lines.append(f"### {para.text}") - elif 'List' in style_name: - md_lines.append(f"- {para.text}") - else: - md_lines.append(para.text) - - md_lines.append("") - - if include_tables: - tables = extract_tables(doc) - for table in tables: - if table: - # Header row - md_lines.append("| " + " | ".join(table[0]) + " |") - md_lines.append("| " + " | ".join(["---"] * len(table[0])) + " |") - # Data rows - for row in table[1:]: - md_lines.append("| " + " | ".join(row) + " |") - md_lines.append("") - - return '\n'.join(md_lines) - - -def main(): - parser = argparse.ArgumentParser(description='Read Word documents') - parser.add_argument('--file', required=True, help='Path to Word document') - parser.add_argument('--format', choices=['text', 'json', 'markdown'], default='text', - help='Output format (default: text)') - parser.add_argument('--include-tables', action='store_true', help='Include table data') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - if args.format == 'text': - result = read_docx_text(args.file, args.include_tables) - print(result) - elif args.format == 'json': - result = read_docx_json(args.file, args.include_tables) - print(json.dumps(result, indent=2)) - elif args.format == 'markdown': - result = read_docx_markdown(args.file, args.include_tables) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/file-search/Skills.md b/coderrr-skills/skills/file-search/Skills.md deleted file mode 100644 index 7bdac35..0000000 --- a/coderrr-skills/skills/file-search/Skills.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -name: file-search -description: Find files and search content within your filesystem. Use this skill when the user asks to find files by name or pattern, search for text within files (grep-like operations), get directory statistics, count files, or analyze folder structure. Handles glob patterns, regex search, and comprehensive file system analysis. ---- - -This skill provides powerful filesystem search and analysis capabilities using only Python's standard library. It handles file discovery, content searching, and statistical analysis of directories. - -The user provides a search query, file pattern, or directory path. They may want to find specific files, search for text patterns, or understand the structure of a codebase. - -## Approach - -Before invoking tools, understand the search intent: -- **Find files by name/pattern**: Use `find_files` with glob patterns -- **Search within file contents**: Use `search_content` with text or regex queries -- **Analyze directory structure**: Use `file_stats` for size, counts, and composition -- **Combined operations**: Chain tools for complex queries (e.g., find Python files, then search within them) - -## Tools - -### find_files - -Recursively finds files and directories matching glob patterns. - -```bash -python tools/find_files.py --pattern <glob> --path <directory> [--type <file|dir|all>] -``` - -**Arguments:** -- `--pattern` (required): Glob pattern to match (e.g., `*.py`, `**/*.json`, `test_*`) -- `--path` (required): Directory to search in -- `--type` (optional): Filter by type - `file`, `dir`, or `all` (default: all) - -**Output:** JSON array of matching paths. - -**When to use:** -- Finding all files of a certain type -- Locating configuration files -- Discovering test files or specific modules -- Listing directories matching a pattern - -**Glob Pattern Guide:** -- `*` matches any characters in a single path segment -- `**` matches any characters across path segments (recursive) -- `?` matches a single character -- `[abc]` matches any character in brackets - ---- - -### search_content - -Searches for text patterns within files. Similar to grep but outputs structured JSON. - -```bash -python tools/search_content.py --query <text> --path <file_or_dir> [--regex] -``` - -**Arguments:** -- `--query` (required): Text or regex pattern to search for -- `--path` (required): File or directory to search in -- `--regex` (optional): Treat query as a regular expression - -**Output:** JSON array of matches with file, line number, and content. - -**When to use:** -- Finding where a function or variable is used -- Locating TODO comments or specific strings -- Searching for import statements -- Finding configuration values - -**Supported file types:** Python, JavaScript, TypeScript, Java, C/C++, Go, Rust, Ruby, PHP, HTML, CSS, JSON, YAML, Markdown, and more. - ---- - -### file_stats - -Analyzes files and directories, providing comprehensive statistics. - -```bash -python tools/file_stats.py --path <file_or_dir> -``` - -**Arguments:** -- `--path` (required): File or directory to analyze - -**Output:** JSON with file counts, sizes, type breakdown, and largest files. - -**When to use:** -- Understanding codebase composition -- Finding the largest files in a project -- Counting files by type -- Auditing directory structure - -## Common Patterns - -### Find All Python Files in Project -```bash -python tools/find_files.py --pattern "**/*.py" --path ./src --type file -``` - -### Search for Function Usage -```bash -python tools/search_content.py --query "def process_data" --path ./src -``` - -### Find Imports with Regex -```bash -python tools/search_content.py --query "^import\s+\w+" --path ./src --regex -``` - -### Get Project Statistics -```bash -python tools/file_stats.py --path ./my-project -``` - -### Find Only Directories -```bash -python tools/find_files.py --pattern "*test*" --path . --type dir -``` - -## Best Practices - -1. **Use specific paths** - Narrow the search scope for faster results -2. **Leverage glob patterns** - `**/*.py` is more efficient than searching everything -3. **Use regex for complex patterns** - When simple text matching isn't enough -4. **Check file_stats first** - Understand the codebase before deep searching -5. **Combine tools** - Find files first, then search within specific ones - -## Error Handling - -| Exit Code | Meaning | Recovery | -|-----------|---------|----------| -| 0 | Success | - | -| 1 | Invalid path or pattern | Verify path exists and pattern syntax | -| 2 | Permission denied | Check file permissions | - -## Dependencies - -None - uses Python's standard library only (pathlib, os, re, json). diff --git a/coderrr-skills/skills/file-search/tools/file_stats.py b/coderrr-skills/skills/file-search/tools/file_stats.py deleted file mode 100644 index 2fdfe60..0000000 --- a/coderrr-skills/skills/file-search/tools/file_stats.py +++ /dev/null @@ -1,153 +0,0 @@ -#!/usr/bin/env python3 -""" -Get statistics about files and directories. - -This tool analyzes a file or directory and provides statistics including -file count, total size, file type breakdown, and largest files. - -Usage: - python file_stats.py --path ./src - python file_stats.py --path ./main.py - -Exit Codes: - 0 - Success - 1 - Invalid path -""" - -import argparse -import sys -import json -import os -from pathlib import Path -from typing import Dict, Any, List -from collections import defaultdict - - -def format_size(size_bytes: int) -> str: - """Format bytes to human-readable size.""" - for unit in ['B', 'KB', 'MB', 'GB', 'TB']: - if size_bytes < 1024.0: - return f"{size_bytes:.2f} {unit}" - size_bytes /= 1024.0 - return f"{size_bytes:.2f} PB" - - -def get_file_stats(file_path: Path) -> Dict[str, Any]: - """Get statistics for a single file.""" - stat = file_path.stat() - return { - 'path': str(file_path), - 'type': 'file', - 'size': stat.st_size, - 'size_human': format_size(stat.st_size), - 'extension': file_path.suffix or '(no extension)', - 'modified': stat.st_mtime - } - - -def get_directory_stats(dir_path: Path, top_n: int = 10) -> Dict[str, Any]: - """Get statistics for a directory.""" - file_count = 0 - dir_count = 0 - total_size = 0 - file_types: Dict[str, int] = defaultdict(int) - files_with_sizes: List[Dict[str, Any]] = [] - - try: - for root, dirs, files in os.walk(dir_path): - dir_count += len(dirs) - - for filename in files: - file_path = Path(root) / filename - file_count += 1 - - try: - size = file_path.stat().st_size - total_size += size - - ext = file_path.suffix.lower() if file_path.suffix else '(no extension)' - file_types[ext] += 1 - - files_with_sizes.append({ - 'path': str(file_path), - 'size': size - }) - except (OSError, IOError): - pass # Skip files we can't access - except PermissionError: - pass - - # Sort by size and take top N - files_with_sizes.sort(key=lambda x: x['size'], reverse=True) - largest_files = files_with_sizes[:top_n] - - # Sort file types by count - sorted_types = dict(sorted(file_types.items(), key=lambda x: x[1], reverse=True)) - - return { - 'path': str(dir_path), - 'type': 'directory', - 'file_count': file_count, - 'dir_count': dir_count, - 'total_size': total_size, - 'total_size_human': format_size(total_size), - 'file_types': sorted_types, - 'largest_files': largest_files - } - - -def file_stats(path_str: str) -> Dict[str, Any]: - """ - Get statistics about a file or directory. - - Args: - path_str: Path to analyze - - Returns: - Dictionary with statistics - - Raises: - ValueError: If path doesn't exist - """ - path = Path(path_str) - - if not path.exists(): - raise ValueError(f"Path does not exist: {path_str}") - - if path.is_file(): - return get_file_stats(path) - else: - return get_directory_stats(path) - - -def main(): - parser = argparse.ArgumentParser( - description='Get statistics about files and directories', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python file_stats.py --path ./src - python file_stats.py --path ./main.py - ''' - ) - parser.add_argument( - '--path', - required=True, - help='File or directory to analyze' - ) - - args = parser.parse_args() - - try: - stats = file_stats(args.path) - print(json.dumps(stats, indent=2)) - except ValueError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - except PermissionError as e: - print(f"Error: Permission denied - {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/file-search/tools/find_files.py b/coderrr-skills/skills/file-search/tools/find_files.py deleted file mode 100644 index aa520a1..0000000 --- a/coderrr-skills/skills/file-search/tools/find_files.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python3 -""" -Find files and directories matching a glob pattern. - -This tool searches a directory for files and directories that match -the specified glob pattern. - -Usage: - python find_files.py --pattern "*.py" --path ./src - python find_files.py --pattern "**/*.json" --path . --type file - -Exit Codes: - 0 - Success - 1 - Invalid path or pattern -""" - -import argparse -import sys -import json -import os -from pathlib import Path -from typing import List - - -def find_files(pattern: str, search_path: str, file_type: str = 'all') -> List[str]: - """ - Find files and directories matching a glob pattern. - - Args: - pattern: Glob pattern to match - search_path: Directory to search in - file_type: Filter by type - 'file', 'dir', or 'all' - - Returns: - List of matching paths as strings - - Raises: - ValueError: If path doesn't exist or is invalid - """ - path = Path(search_path) - - if not path.exists(): - raise ValueError(f"Path does not exist: {search_path}") - - if not path.is_dir(): - raise ValueError(f"Path is not a directory: {search_path}") - - matches = [] - - try: - for match in path.glob(pattern): - if file_type == 'file' and not match.is_file(): - continue - if file_type == 'dir' and not match.is_dir(): - continue - - # Use relative path from search directory - try: - relative = match.relative_to(path) - matches.append(str(Path(search_path) / relative)) - except ValueError: - matches.append(str(match)) - except Exception as e: - raise ValueError(f"Invalid pattern: {e}") - - return sorted(matches) - - -def main(): - parser = argparse.ArgumentParser( - description='Find files and directories matching a glob pattern', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python find_files.py --pattern "*.py" --path ./src - python find_files.py --pattern "**/*.json" --path . --type file - python find_files.py --pattern "*" --path ./project --type dir - ''' - ) - parser.add_argument( - '--pattern', - required=True, - help='Glob pattern to match (e.g., "*.py", "**/*.json")' - ) - parser.add_argument( - '--path', - required=True, - help='Directory to search in' - ) - parser.add_argument( - '--type', - choices=['file', 'dir', 'all'], - default='all', - help='Filter by type (default: all)' - ) - - args = parser.parse_args() - - try: - matches = find_files(args.pattern, args.path, args.type) - print(json.dumps(matches, indent=2)) - except ValueError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - except PermissionError as e: - print(f"Error: Permission denied - {e}", file=sys.stderr) - sys.exit(2) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/file-search/tools/search_content.py b/coderrr-skills/skills/file-search/tools/search_content.py deleted file mode 100644 index dc0d3af..0000000 --- a/coderrr-skills/skills/file-search/tools/search_content.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env python3 -""" -Search for text within files (grep-like functionality). - -This tool searches for text patterns within files and returns matches -with line numbers. - -Usage: - python search_content.py --query "TODO" --path ./src - python search_content.py --query "def \\w+\\(" --path ./src --regex - -Exit Codes: - 0 - Success - 1 - Invalid path or pattern -""" - -import argparse -import sys -import json -import os -import re -from pathlib import Path -from typing import List, Dict, Any - - -# File extensions to search (text files only) -SEARCHABLE_EXTENSIONS = { - '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.cpp', '.h', '.hpp', - '.cs', '.go', '.rs', '.rb', '.php', '.swift', '.kt', '.scala', '.r', - '.sql', '.sh', '.bash', '.zsh', '.ps1', '.bat', '.cmd', - '.html', '.css', '.scss', '.sass', '.less', '.xml', '.json', '.yaml', '.yml', - '.md', '.txt', '.rst', '.ini', '.cfg', '.conf', '.env', '.toml', - '.gitignore', '.dockerignore', 'Dockerfile', 'Makefile', '.editorconfig' -} - - -def is_searchable(file_path: Path) -> bool: - """Check if a file should be searched based on extension.""" - if file_path.suffix.lower() in SEARCHABLE_EXTENSIONS: - return True - if file_path.name in SEARCHABLE_EXTENSIONS: - return True - return False - - -def search_file(file_path: Path, query: str, is_regex: bool = False) -> List[Dict[str, Any]]: - """ - Search for matches in a single file. - - Args: - file_path: Path to the file to search - query: Text or regex pattern to search for - is_regex: Whether to treat query as regex - - Returns: - List of match objects with file, line, and content - """ - matches = [] - - try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: - for line_num, line in enumerate(f, 1): - if is_regex: - if re.search(query, line): - matches.append({ - 'file': str(file_path), - 'line': line_num, - 'content': line.rstrip() - }) - else: - if query.lower() in line.lower(): - matches.append({ - 'file': str(file_path), - 'line': line_num, - 'content': line.rstrip() - }) - except (IOError, OSError): - pass # Skip files that can't be read - - return matches - - -def search_content(query: str, search_path: str, is_regex: bool = False) -> List[Dict[str, Any]]: - """ - Search for text within files. - - Args: - query: Text or pattern to search for - search_path: File or directory to search in - is_regex: Whether to treat query as regex - - Returns: - List of match objects - - Raises: - ValueError: If path doesn't exist or regex is invalid - """ - path = Path(search_path) - - if not path.exists(): - raise ValueError(f"Path does not exist: {search_path}") - - # Validate regex if needed - if is_regex: - try: - re.compile(query) - except re.error as e: - raise ValueError(f"Invalid regex pattern: {e}") - - all_matches = [] - - if path.is_file(): - all_matches.extend(search_file(path, query, is_regex)) - else: - for root, _, files in os.walk(path): - for filename in files: - file_path = Path(root) / filename - if is_searchable(file_path): - all_matches.extend(search_file(file_path, query, is_regex)) - - return all_matches - - -def main(): - parser = argparse.ArgumentParser( - description='Search for text within files', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python search_content.py --query "TODO" --path ./src - python search_content.py --query "def \\w+\\(" --path ./src --regex - python search_content.py --query "import" --path ./main.py - ''' - ) - parser.add_argument( - '--query', - required=True, - help='Text or pattern to search for' - ) - parser.add_argument( - '--path', - required=True, - help='File or directory to search in' - ) - parser.add_argument( - '--regex', - action='store_true', - help='Treat query as a regular expression' - ) - - args = parser.parse_args() - - try: - matches = search_content(args.query, args.path, args.regex) - print(json.dumps(matches, indent=2)) - except ValueError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/internal-comms/Skills.md b/coderrr-skills/skills/internal-comms/Skills.md deleted file mode 100644 index c62d3d4..0000000 --- a/coderrr-skills/skills/internal-comms/Skills.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -name: internal-comms -description: Write internal communications like status reports, newsletters, announcements, and team updates. Use this skill when the user needs to draft status reports, write team newsletters, create announcements, compose meeting summaries, or generate internal documentation. ---- - -This skill helps create professional internal communications. It generates well-structured content for various internal communication needs with appropriate tone and formatting. - -The user wants to write internal communications. They may provide context, key points, or ask for a specific type of document. - -## Approach - -When creating internal communications: -1. **Choose type**: Identify the communication type -2. **Gather context**: Collect key information and audience -3. **Generate**: Use appropriate tool for content type -4. **Format**: Export in desired format - -## Tools - -### status_report - -Generates project or team status reports. - -```bash -python tools/status_report.py --project <name> --period <period> --data <json> [--format <markdown|html>] -``` - -**Arguments:** -- `--project` (required): Project/team name -- `--period` (required): Reporting period (e.g., "Week 5", "January 2024") -- `--data` (required): Report data JSON -- `--format` (optional): Output format (default: markdown) - -**Data JSON:** -```json -{ - "highlights": ["Completed feature X", "Launched beta"], - "progress": {"tasks_completed": 15, "tasks_remaining": 8}, - "blockers": ["Waiting on API access"], - "next_steps": ["Begin testing phase"], - "metrics": {"velocity": 42, "bugs_fixed": 7} -} -``` - -**When to use:** -- Weekly/monthly status updates -- Project progress reports -- Team performance summaries - ---- - -### newsletter - -Creates internal newsletters. - -```bash -python tools/newsletter.py --title <title> --sections <json> [--format <markdown|html>] -``` - -**Arguments:** -- `--title` (required): Newsletter title -- `--sections` (required): Section content JSON -- `--format` (optional): Output format - -**Sections JSON:** -```json -[ - {"type": "intro", "content": "Welcome message..."}, - {"type": "highlight", "title": "Big Win", "content": "We achieved..."}, - {"type": "updates", "items": ["Update 1", "Update 2"]}, - {"type": "spotlight", "name": "Jane Doe", "role": "Engineer", "content": "Achievements..."}, - {"type": "upcoming", "events": [{"date": "Feb 15", "title": "All-hands"}]} -] -``` - -**When to use:** -- Weekly team newsletters -- Monthly company updates -- Department communications - ---- - -### announcement - -Creates formal announcements. - -```bash -python tools/announcement.py --type <type> --subject <subject> --content <json> [--urgency <level>] -``` - -**Arguments:** -- `--type` (required): Announcement type - `general`, `policy`, `event`, `change`, `launch` -- `--subject` (required): Announcement subject -- `--content` (required): Content details JSON -- `--urgency` (optional): Urgency level - `normal`, `important`, `urgent` - -**Content JSON:** -```json -{ - "summary": "Brief summary of announcement", - "details": "Full details and context...", - "action_items": ["Review by Friday", "Submit feedback"], - "contact": "jane@company.com", - "effective_date": "2024-02-01" -} -``` - -**When to use:** -- Policy updates -- Organizational changes -- Product launches -- Event announcements - ---- - -### meeting_summary - -Generates meeting summaries. - -```bash -python tools/meeting_summary.py --title <title> --date <date> --data <json> -``` - -**Arguments:** -- `--title` (required): Meeting title -- `--date` (required): Meeting date -- `--data` (required): Meeting data JSON - -**Data JSON:** -```json -{ - "attendees": ["Alice", "Bob", "Charlie"], - "agenda": ["Q1 planning", "Budget review"], - "discussion": [ - {"topic": "Q1 Goals", "summary": "Agreed on 3 key objectives..."}, - {"topic": "Budget", "summary": "Approved $50k allocation..."} - ], - "decisions": ["Launch in March", "Hire 2 engineers"], - "action_items": [ - {"owner": "Alice", "task": "Draft proposal", "due": "Feb 10"} - ], - "next_meeting": "Feb 15, 2024" -} -``` - -**When to use:** -- Team meeting notes -- Stakeholder meeting summaries -- Decision documentation - ---- - -### template - -Generates reusable communication templates. - -```bash -python tools/template.py --type <type> [--customize <json>] -``` - -**Arguments:** -- `--type` (required): Template type - `status`, `newsletter`, `announcement`, `meeting`, `email` -- `--customize` (optional): Customization options - -**When to use:** -- Setting up recurring communications -- Standardizing team output -- Creating document templates - -## Common Patterns - -### Weekly Status Report -```bash -python tools/status_report.py --project "Backend Team" --period "Week 5" --data '{"highlights": ["Deployed v2.0", "Fixed 12 bugs"], "progress": {"tasks_completed": 18, "tasks_remaining": 5}, "blockers": [], "next_steps": ["Performance testing"]}' -``` - -### Team Newsletter -```bash -python tools/newsletter.py --title "Engineering Weekly #12" --sections '[{"type": "intro", "content": "Great week everyone!"}, {"type": "highlight", "title": "Launch Success", "content": "Product v2.0 is live!"}, {"type": "upcoming", "events": [{"date": "Feb 20", "title": "Hackathon"}]}]' --format html -``` - -### Policy Announcement -```bash -python tools/announcement.py --type policy --subject "Remote Work Update" --content '{"summary": "New hybrid policy starting March", "details": "We are updating our remote work policy...", "effective_date": "2024-03-01"}' --urgency important -``` - -## Writing Guidelines - -### Tone -- **Status reports**: Factual, concise, data-driven -- **Newsletters**: Engaging, positive, inclusive -- **Announcements**: Clear, direct, professional -- **Meeting notes**: Structured, action-oriented - -### Structure -- Lead with key information -- Use bullet points for lists -- Include clear action items -- Provide contact for questions - -### Best Practices -1. Keep it scannable -2. Highlight important dates -3. Use consistent formatting -4. Include relevant links -5. Proofread before sending - -## Dependencies - -Uses Python standard library only. diff --git a/coderrr-skills/skills/internal-comms/tools/announcement.py b/coderrr-skills/skills/internal-comms/tools/announcement.py deleted file mode 100644 index f855793..0000000 --- a/coderrr-skills/skills/internal-comms/tools/announcement.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 -""" -Generate announcements. - -Usage: - python announcement.py --type general --subject "Title" --content '{...}' -""" - -import argparse -import sys -import json -from datetime import datetime - - -URGENCY_ICONS = { - "normal": "šŸ“¢", - "important": "āš ļø", - "urgent": "🚨" -} - -TYPE_TITLES = { - "general": "Announcement", - "policy": "Policy Update", - "event": "Event Announcement", - "change": "Change Notice", - "launch": "Launch Announcement" -} - - -def generate_announcement(ann_type: str, subject: str, content: dict, urgency: str = 'normal') -> str: - """Generate announcement.""" - icon = URGENCY_ICONS.get(urgency, "šŸ“¢") - type_title = TYPE_TITLES.get(ann_type, "Announcement") - - lines = [ - f"# {icon} {type_title}: {subject}", - "", - f"**Date:** {datetime.now().strftime('%B %d, %Y')}", - ] - - if content.get("effective_date"): - lines.append(f"**Effective:** {content['effective_date']}") - - if urgency != "normal": - lines.append(f"**Priority:** {urgency.upper()}") - - lines.append("") - lines.append("---") - lines.append("") - - # Summary - if content.get("summary"): - lines.append(f"**TL;DR:** {content['summary']}") - lines.append("") - - # Details - if content.get("details"): - lines.append("## Details") - lines.append(content["details"]) - lines.append("") - - # Action Items - if content.get("action_items"): - lines.append("## Action Required") - for item in content["action_items"]: - lines.append(f"- [ ] {item}") - lines.append("") - - # Contact - if content.get("contact"): - lines.append("---") - lines.append(f"*Questions? Contact: {content['contact']}*") - - return '\n'.join(lines) - - -def main(): - parser = argparse.ArgumentParser(description='Generate announcement') - parser.add_argument('--type', required=True, - choices=['general', 'policy', 'event', 'change', 'launch']) - parser.add_argument('--subject', required=True, help='Announcement subject') - parser.add_argument('--content', required=True, help='Content JSON') - parser.add_argument('--urgency', default='normal', choices=['normal', 'important', 'urgent']) - - args = parser.parse_args() - - try: - content = json.loads(args.content) - except json.JSONDecodeError as e: - print(f"Error: Invalid content JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = generate_announcement(args.type, args.subject, content, args.urgency) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/internal-comms/tools/meeting_summary.py b/coderrr-skills/skills/internal-comms/tools/meeting_summary.py deleted file mode 100644 index 3c7b9aa..0000000 --- a/coderrr-skills/skills/internal-comms/tools/meeting_summary.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python3 -""" -Generate meeting summaries. - -Usage: - python meeting_summary.py --title "Team Sync" --date "2024-02-01" --data '{...}' -""" - -import argparse -import sys -import json - - -def generate_meeting_summary(title: str, date: str, data: dict) -> str: - """Generate meeting summary.""" - lines = [ - f"# šŸ“‹ Meeting Summary: {title}", - "", - f"**Date:** {date}", - ] - - if data.get("attendees"): - lines.append(f"**Attendees:** {', '.join(data['attendees'])}") - - lines.append("") - lines.append("---") - lines.append("") - - # Agenda - if data.get("agenda"): - lines.append("## šŸ“Œ Agenda") - for i, item in enumerate(data["agenda"], 1): - lines.append(f"{i}. {item}") - lines.append("") - - # Discussion - if data.get("discussion"): - lines.append("## šŸ’¬ Discussion") - for item in data["discussion"]: - lines.append(f"### {item.get('topic', 'Topic')}") - lines.append(item.get("summary", "")) - lines.append("") - - # Decisions - if data.get("decisions"): - lines.append("## āœ… Decisions Made") - for decision in data["decisions"]: - lines.append(f"- āœ“ {decision}") - lines.append("") - - # Action Items - if data.get("action_items"): - lines.append("## šŸ“ Action Items") - lines.append("") - lines.append("| Owner | Task | Due Date |") - lines.append("|-------|------|----------|") - for item in data["action_items"]: - owner = item.get("owner", "-") - task = item.get("task", "-") - due = item.get("due", "-") - lines.append(f"| {owner} | {task} | {due} |") - lines.append("") - - # Next Meeting - if data.get("next_meeting"): - lines.append("---") - lines.append(f"**Next Meeting:** {data['next_meeting']}") - - return '\n'.join(lines) - - -def main(): - parser = argparse.ArgumentParser(description='Generate meeting summary') - parser.add_argument('--title', required=True, help='Meeting title') - parser.add_argument('--date', required=True, help='Meeting date') - parser.add_argument('--data', required=True, help='Meeting data JSON') - - args = parser.parse_args() - - try: - data = json.loads(args.data) - except json.JSONDecodeError as e: - print(f"Error: Invalid data JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = generate_meeting_summary(args.title, args.date, data) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/internal-comms/tools/newsletter.py b/coderrr-skills/skills/internal-comms/tools/newsletter.py deleted file mode 100644 index 7ea8df1..0000000 --- a/coderrr-skills/skills/internal-comms/tools/newsletter.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python3 -""" -Generate internal newsletters. - -Usage: - python newsletter.py --title "Weekly Update" --sections '[...]' -""" - -import argparse -import sys -import json -from datetime import datetime - - -def generate_newsletter_md(title: str, sections: list) -> str: - """Generate markdown newsletter.""" - lines = [ - f"# šŸ“° {title}", - f"*{datetime.now().strftime('%B %d, %Y')}*", - "", - "---", - "" - ] - - for section in sections: - section_type = section.get("type", "content") - - if section_type == "intro": - lines.append(section.get("content", "")) - lines.append("") - - elif section_type == "highlight": - lines.append(f"## 🌟 {section.get('title', 'Highlight')}") - lines.append(section.get("content", "")) - lines.append("") - - elif section_type == "updates": - lines.append("## šŸ“‹ Updates") - for item in section.get("items", []): - lines.append(f"- {item}") - lines.append("") - - elif section_type == "spotlight": - lines.append(f"## šŸ‘¤ Team Spotlight: {section.get('name', 'Team Member')}") - if section.get("role"): - lines.append(f"*{section['role']}*") - lines.append("") - lines.append(section.get("content", "")) - lines.append("") - - elif section_type == "upcoming": - lines.append("## šŸ“… Upcoming Events") - for event in section.get("events", []): - lines.append(f"- **{event.get('date', '')}**: {event.get('title', '')}") - lines.append("") - - elif section_type == "content": - if section.get("title"): - lines.append(f"## {section['title']}") - lines.append(section.get("content", "")) - lines.append("") - - lines.append("---") - lines.append("*Questions? Reply to this newsletter or reach out to the team.*") - - return '\n'.join(lines) - - -def generate_newsletter_html(title: str, sections: list) -> str: - """Generate HTML newsletter.""" - md = generate_newsletter_md(title, sections) - - return f"""<!DOCTYPE html> -<html> -<head> - <title>{title} - - - -
-
{md}
-
- -""" - - -def newsletter(title: str, sections: list, format_type: str = 'markdown'): - """Generate newsletter.""" - if format_type == 'html': - return generate_newsletter_html(title, sections) - return generate_newsletter_md(title, sections) - - -def main(): - parser = argparse.ArgumentParser(description='Generate newsletter') - parser.add_argument('--title', required=True, help='Newsletter title') - parser.add_argument('--sections', required=True, help='Sections JSON') - parser.add_argument('--format', default='markdown', choices=['markdown', 'html']) - - args = parser.parse_args() - - try: - sections = json.loads(args.sections) - except json.JSONDecodeError as e: - print(f"Error: Invalid sections JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = newsletter(args.title, sections, args.format) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/internal-comms/tools/status_report.py b/coderrr-skills/skills/internal-comms/tools/status_report.py deleted file mode 100644 index f2361d7..0000000 --- a/coderrr-skills/skills/internal-comms/tools/status_report.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env python3 -""" -Generate status reports. - -Usage: - python status_report.py --project "Team Name" --period "Week 5" --data '{...}' -""" - -import argparse -import sys -import json -from datetime import datetime - - -def generate_markdown(project: str, period: str, data: dict) -> str: - """Generate markdown status report.""" - lines = [ - f"# Status Report: {project}", - f"**Period:** {period}", - f"**Generated:** {datetime.now().strftime('%Y-%m-%d')}", - "", - "---", - "" - ] - - # Highlights - if data.get("highlights"): - lines.append("## šŸŽÆ Highlights") - for item in data["highlights"]: - lines.append(f"- {item}") - lines.append("") - - # Progress - if data.get("progress"): - lines.append("## šŸ“Š Progress") - progress = data["progress"] - if "tasks_completed" in progress: - total = progress.get("tasks_completed", 0) + progress.get("tasks_remaining", 0) - pct = (progress["tasks_completed"] / total * 100) if total > 0 else 0 - lines.append(f"- **Completed:** {progress['tasks_completed']} tasks") - lines.append(f"- **Remaining:** {progress.get('tasks_remaining', 0)} tasks") - lines.append(f"- **Progress:** {pct:.0f}%") - lines.append("") - - # Metrics - if data.get("metrics"): - lines.append("## šŸ“ˆ Metrics") - for metric, value in data["metrics"].items(): - lines.append(f"- **{metric.replace('_', ' ').title()}:** {value}") - lines.append("") - - # Blockers - if data.get("blockers"): - lines.append("## 🚧 Blockers") - for item in data["blockers"]: - lines.append(f"- āš ļø {item}") - lines.append("") - - # Next Steps - if data.get("next_steps"): - lines.append("## āž”ļø Next Steps") - for item in data["next_steps"]: - lines.append(f"- {item}") - lines.append("") - - return '\n'.join(lines) - - -def generate_html(project: str, period: str, data: dict) -> str: - """Generate HTML status report.""" - md = generate_markdown(project, period, data) - # Simple markdown to HTML conversion - html = f""" - - - Status Report: {project} - - - -
{md}
- -""" - return html - - -def status_report(project: str, period: str, data: dict, format_type: str = 'markdown'): - """Generate status report.""" - if format_type == 'html': - return generate_html(project, period, data) - return generate_markdown(project, period, data) - - -def main(): - parser = argparse.ArgumentParser(description='Generate status report') - parser.add_argument('--project', required=True, help='Project/team name') - parser.add_argument('--period', required=True, help='Reporting period') - parser.add_argument('--data', required=True, help='Report data JSON') - parser.add_argument('--format', default='markdown', choices=['markdown', 'html']) - - args = parser.parse_args() - - try: - data = json.loads(args.data) - except json.JSONDecodeError as e: - print(f"Error: Invalid data JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = status_report(args.project, args.period, data, args.format) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/json-tools/Skills.md b/coderrr-skills/skills/json-tools/Skills.md deleted file mode 100644 index c14fcaa..0000000 --- a/coderrr-skills/skills/json-tools/Skills.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -name: json-tools -description: Format, query, and validate JSON data. Use this skill when the user asks to pretty-print JSON, extract values from JSON, validate JSON syntax, minify JSON, or work with nested JSON structures. Provides JSONPath-like querying, formatting options, and detailed syntax validation. ---- - -This skill handles all common JSON operations using only Python's standard library. It provides formatting with configurable indentation, querying with path expressions, and validation with precise error locations. - -The user provides JSON data (as files or input) to process. They may want to format it for readability, extract specific values, or validate syntax before use. - -## Approach - -Before invoking tools, understand the JSON operation: -- **Readability**: Use `format_json` to pretty-print or minify -- **Data extraction**: Use `query_json` with path expressions -- **Syntax check**: Use `validate_json` to verify and locate errors -- **Pipeline**: Chain tools for complex operations (validate → query → format) - -## Tools - -### format_json - -Pretty-prints or minifies JSON data. Reads from file or stdin for easy piping. - -```bash -python tools/format_json.py [--file ] [--indent ] [--minify] -``` - -**Arguments:** -- `--file` (optional): Path to JSON file. If omitted, reads from stdin -- `--indent` (optional): Indentation spaces (default: 2) -- `--minify` (optional): Compress to single line - -**Output:** Formatted JSON to stdout. - -**When to use:** -- Making JSON human-readable -- Standardizing JSON formatting -- Minimizing JSON file size -- Piping output from API calls - ---- - -### query_json - -Extracts values from JSON using path expressions. Supports simple JSONPath-like syntax. - -```bash -python tools/query_json.py --file --path -``` - -**Arguments:** -- `--file` (required): Path to JSON file -- `--path` (required): Path expression (e.g., `user.name`, `items[0]`, `data[*].id`) - -**Path Syntax:** -- `key` or `.key` - Access object property -- `[0]` - Access array element by index -- `[*]` - Access ALL array elements (returns array of matched values) - -**Output:** The matched value as JSON. - -**When to use:** -- Extracting specific values from config files -- Getting nested data from API responses -- Selecting array elements -- Drilling into complex JSON structures - -**Examples:** -- `user.email` → Gets user's email -- `users[0]` → Gets first user -- `items[*].name` → Gets all item names as array -- `config.database.host` → Gets nested config value - ---- - -### validate_json - -Validates JSON syntax and reports precise error locations. - -```bash -python tools/validate_json.py --file -``` - -**Arguments:** -- `--file` (required): Path to JSON file to validate - -**Output:** JSON with validation result. If invalid, includes error message, line, and column. - -**When to use:** -- Checking JSON before parsing in code -- Debugging JSON syntax errors -- Validating user-provided JSON -- CI/CD pipeline validation - -## Common Patterns - -### Pretty Print a File -```bash -python tools/format_json.py --file config.json --indent 4 -``` - -### Minify JSON -```bash -python tools/format_json.py --file data.json --minify -``` - -### Format Piped Input -```bash -echo '{"name":"John","age":30}' | python tools/format_json.py -``` - -### Extract Nested Value -```bash -python tools/query_json.py --file response.json --path "data.user.profile.email" -``` - -### Get All IDs from Array -```bash -python tools/query_json.py --file users.json --path "users[*].id" -``` - -### Validate Before Processing -```bash -python tools/validate_json.py --file input.json -``` - -## Best Practices - -1. **Validate first** - Check syntax before querying or processing -2. **Use precise paths** - `users[0].name` is clearer than complex filtering -3. **Pipe for workflows** - Combine with other tools via stdin/stdout -4. **Consistent formatting** - Use same indent (2 or 4) across project -5. **Minify for production** - Reduce file size for deployment - -## Path Expression Examples - -Given this JSON: -```json -{ - "users": [ - {"id": 1, "name": "Alice", "roles": ["admin", "user"]}, - {"id": 2, "name": "Bob", "roles": ["user"]} - ], - "meta": {"total": 2, "page": 1} -} -``` - -| Path | Result | -|------|--------| -| `users` | The entire users array | -| `users[0]` | `{"id": 1, "name": "Alice", ...}` | -| `users[0].name` | `"Alice"` | -| `users[*].name` | `["Alice", "Bob"]` | -| `users[0].roles[0]` | `"admin"` | -| `meta.total` | `2` | - -## Error Handling - -| Exit Code | Meaning | Recovery | -|-----------|---------|----------| -| 0 | Success | - | -| 1 | Invalid file path | Verify file exists | -| 2 | JSON parsing error | Check syntax with validate_json | -| 3 | Invalid path expression | Check path syntax | - -## Dependencies - -None - uses Python's standard library only (json, re, pathlib). diff --git a/coderrr-skills/skills/json-tools/tools/format_json.py b/coderrr-skills/skills/json-tools/tools/format_json.py deleted file mode 100644 index 3701b1d..0000000 --- a/coderrr-skills/skills/json-tools/tools/format_json.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python3 -""" -Format and pretty print JSON data. - -This tool reads JSON from a file or stdin and outputs it formatted -with configurable indentation, or minified. - -Usage: - python format_json.py --file data.json - echo '{"a":1}' | python format_json.py - python format_json.py --file data.json --minify - -Exit Codes: - 0 - Success - 1 - Invalid file path - 2 - JSON parsing error -""" - -import argparse -import sys -import json -from pathlib import Path - - -def format_json(data: str, indent: int = 2, minify: bool = False) -> str: - """ - Format JSON data. - - Args: - data: JSON string to format - indent: Indentation level (ignored if minify is True) - minify: If True, compress to single line - - Returns: - Formatted JSON string - - Raises: - json.JSONDecodeError: If JSON is invalid - """ - parsed = json.loads(data) - - if minify: - return json.dumps(parsed, separators=(',', ':'), ensure_ascii=False) - else: - return json.dumps(parsed, indent=indent, ensure_ascii=False) - - -def main(): - parser = argparse.ArgumentParser( - description='Format and pretty print JSON', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python format_json.py --file data.json - python format_json.py --file data.json --indent 4 - python format_json.py --file data.json --minify - echo '{"name":"John"}' | python format_json.py - ''' - ) - parser.add_argument( - '--file', - help='Path to JSON file (reads from stdin if not provided)' - ) - parser.add_argument( - '--indent', - type=int, - default=2, - help='Indentation level (default: 2)' - ) - parser.add_argument( - '--minify', - action='store_true', - help='Compress JSON to single line' - ) - - args = parser.parse_args() - - # Read JSON data - if args.file: - path = Path(args.file) - if not path.exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - try: - with open(path, 'r', encoding='utf-8') as f: - data = f.read() - except IOError as e: - print(f"Error reading file: {e}", file=sys.stderr) - sys.exit(1) - else: - if sys.stdin.isatty(): - print("Error: No input. Provide --file or pipe JSON to stdin.", file=sys.stderr) - sys.exit(1) - data = sys.stdin.read() - - # Format and output - try: - formatted = format_json(data, args.indent, args.minify) - print(formatted) - except json.JSONDecodeError as e: - print(f"Error: Invalid JSON - {e.msg} at line {e.lineno}, column {e.colno}", file=sys.stderr) - sys.exit(2) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/json-tools/tools/query_json.py b/coderrr-skills/skills/json-tools/tools/query_json.py deleted file mode 100644 index 4dbd0e6..0000000 --- a/coderrr-skills/skills/json-tools/tools/query_json.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python3 -""" -Query JSON data using path expressions. - -This tool allows querying nested JSON data using a simple path syntax -similar to JSONPath. - -Usage: - python query_json.py --file data.json --path "users[0].name" - python query_json.py --file data.json --path "items[*].id" - -Exit Codes: - 0 - Success - 1 - Invalid file path - 2 - JSON parsing error - 3 - Invalid path expression -""" - -import argparse -import sys -import json -import re -from pathlib import Path -from typing import Any, List - - -def parse_path(path: str) -> List[Any]: - """ - Parse a path expression into components. - - Supports: - - .key or key - object property - - [0] - array index - - [*] - all array elements - - Args: - path: Path expression string - - Returns: - List of path components - """ - components = [] - - # Split on dots and brackets - pattern = r'\.?([^\.\[\]]+)|\[(\d+|\*)\]' - - for match in re.finditer(pattern, path): - if match.group(1): - # Property name - components.append(match.group(1)) - elif match.group(2): - # Array index or wildcard - idx = match.group(2) - if idx == '*': - components.append('*') - else: - components.append(int(idx)) - - return components - - -def query_value(data: Any, components: List[Any]) -> Any: - """ - Query a value from data using path components. - - Args: - data: The JSON data to query - components: List of path components - - Returns: - The matched value(s) - - Raises: - KeyError: If a key doesn't exist - IndexError: If an index is out of range - TypeError: If the path is invalid for the data type - """ - if not components: - return data - - current = data - result_is_array = False - results = [] - - for i, component in enumerate(components): - remaining = components[i + 1:] - - if component == '*': - # Wildcard - apply remaining path to all elements - if not isinstance(current, list): - raise TypeError(f"Can't use [*] on non-array value") - - for item in current: - try: - result = query_value(item, remaining) - if isinstance(result, list) and remaining and remaining[-1] == '*': - results.extend(result) - else: - results.append(result) - except (KeyError, IndexError, TypeError): - pass - - return results - - elif isinstance(component, int): - # Array index - if not isinstance(current, list): - raise TypeError(f"Can't use [{component}] on non-array value") - current = current[component] - - else: - # Object property - if not isinstance(current, dict): - raise TypeError(f"Can't access '{component}' on non-object value") - if component not in current: - raise KeyError(f"Key '{component}' not found") - current = current[component] - - return current - - -def query_json(file_path: str, path: str) -> Any: - """ - Query JSON file with a path expression. - - Args: - file_path: Path to JSON file - path: Path expression - - Returns: - The matched value - """ - path_obj = Path(file_path) - - if not path_obj.exists(): - raise FileNotFoundError(f"File not found: {file_path}") - - with open(path_obj, 'r', encoding='utf-8') as f: - data = json.load(f) - - components = parse_path(path) - - if not components: - return data - - return query_value(data, components) - - -def main(): - parser = argparse.ArgumentParser( - description='Query JSON data using path expressions', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Path Syntax: - .key or key - Access object property - [0] - Access array element by index - [*] - Access all array elements - -Examples: - python query_json.py --file data.json --path "user.name" - python query_json.py --file data.json --path "users[0]" - python query_json.py --file data.json --path "items[*].id" - python query_json.py --file config.json --path "database.host" - ''' - ) - parser.add_argument( - '--file', - required=True, - help='Path to JSON file' - ) - parser.add_argument( - '--path', - required=True, - help='Path expression (e.g., "users[0].name")' - ) - - args = parser.parse_args() - - try: - result = query_json(args.file, args.path) - print(json.dumps(result, indent=2, ensure_ascii=False)) - except FileNotFoundError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - except json.JSONDecodeError as e: - print(f"Error: Invalid JSON - {e.msg} at line {e.lineno}", file=sys.stderr) - sys.exit(2) - except (KeyError, IndexError, TypeError) as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(3) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/json-tools/tools/validate_json.py b/coderrr-skills/skills/json-tools/tools/validate_json.py deleted file mode 100644 index cdb6178..0000000 --- a/coderrr-skills/skills/json-tools/tools/validate_json.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 -""" -Validate JSON syntax. - -This tool checks if a JSON file has valid syntax and reports -specific error locations if invalid. - -Usage: - python validate_json.py --file data.json - -Exit Codes: - 0 - Success (valid JSON) - 1 - Invalid file path - 2 - Invalid JSON (with error details) -""" - -import argparse -import sys -import json -from pathlib import Path - - -def validate_json(file_path: str) -> dict: - """ - Validate JSON file syntax. - - Args: - file_path: Path to JSON file - - Returns: - Dictionary with validation result - """ - path = Path(file_path) - - if not path.exists(): - raise FileNotFoundError(f"File not found: {file_path}") - - try: - with open(path, 'r', encoding='utf-8') as f: - content = f.read() - except IOError as e: - raise IOError(f"Could not read file: {e}") - - try: - json.loads(content) - return { - 'valid': True, - 'file': str(path) - } - except json.JSONDecodeError as e: - return { - 'valid': False, - 'file': str(path), - 'error': { - 'message': e.msg, - 'line': e.lineno, - 'column': e.colno - } - } - - -def main(): - parser = argparse.ArgumentParser( - description='Validate JSON syntax', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python validate_json.py --file data.json - python validate_json.py --file config.json - ''' - ) - parser.add_argument( - '--file', - required=True, - help='Path to JSON file to validate' - ) - - args = parser.parse_args() - - try: - result = validate_json(args.file) - print(json.dumps(result, indent=2)) - - # Exit with code 2 if invalid (but still output the result) - if not result['valid']: - sys.exit(2) - - except FileNotFoundError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - except IOError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/mcp-builder/Skills.md b/coderrr-skills/skills/mcp-builder/Skills.md deleted file mode 100644 index bc62027..0000000 --- a/coderrr-skills/skills/mcp-builder/Skills.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -name: mcp-builder -description: Guide for creating high-quality MCP (Model Context Protocol) servers. Use this skill when the user wants to build an MCP server, create MCP tools, implement MCP resources, or integrate with MCP-compatible clients. Provides scaffolding, templates, and validation. ---- - -This skill helps build MCP (Model Context Protocol) servers for extending AI agent capabilities. It provides scaffolding, templates, and best practices for creating production-quality MCP implementations. - -The user wants to create an MCP server. They may specify the type of tools, resources, or prompts they want to expose. - -## Approach - -When building MCP servers: -1. **Initialize**: Use `init_mcp` to scaffold server structure -2. **Add tools**: Use `add_mcp_tool` for each tool -3. **Add resources**: Use `add_mcp_resource` for data sources -4. **Validate**: Use `validate_mcp` to check compliance -5. **Test**: Use `test_mcp` to verify functionality - -## Tools - -### init_mcp - -Scaffolds a new MCP server project. - -```bash -python tools/init_mcp.py --name --language --output-dir [--transport ] -``` - -**Arguments:** -- `--name` (required): Server name -- `--language` (required): Implementation language - `py` or `ts` -- `--output-dir` (required): Output directory -- `--transport` (optional): Transport type (default: stdio) - -**Creates:** -``` -my-mcp-server/ -ā”œā”€ā”€ src/ -│ └── server.py (or index.ts) -ā”œā”€ā”€ pyproject.toml (or package.json) -ā”œā”€ā”€ README.md -└── tests/ -``` - -**When to use:** -- Starting a new MCP server -- Getting proper project structure -- Setting up dependencies - ---- - -### add_mcp_tool - -Adds a tool definition to the MCP server. - -```bash -python tools/add_mcp_tool.py --server-dir --name --description --parameters -``` - -**Arguments:** -- `--server-dir` (required): MCP server directory -- `--name` (required): Tool name -- `--description` (required): Tool description -- `--parameters` (required): JSON schema for parameters - -**Parameters JSON:** -```json -{ - "type": "object", - "properties": { - "query": {"type": "string", "description": "Search query"}, - "limit": {"type": "integer", "default": 10} - }, - "required": ["query"] -} -``` - -**When to use:** -- Adding server capabilities -- Exposing functions to AI agents -- Implementing tool handlers - ---- - -### add_mcp_resource - -Adds a resource definition to the MCP server. - -```bash -python tools/add_mcp_resource.py --server-dir --uri --name --description [--mime-type ] -``` - -**Arguments:** -- `--server-dir` (required): MCP server directory -- `--uri` (required): Resource URI pattern (e.g., `file:///{path}`) -- `--name` (required): Resource name -- `--description` (required): Resource description -- `--mime-type` (optional): Content type (default: text/plain) - -**When to use:** -- Exposing data sources -- Providing file access -- Sharing dynamic content - ---- - -### validate_mcp - -Validates MCP server implementation. - -```bash -python tools/validate_mcp.py --server-dir -``` - -**Output:** Validation report with compliance status and issues. - -**Checks:** -- Valid manifest structure -- Tool definitions follow schema -- Resource URIs are valid -- Handler implementations exist - -**When to use:** -- Before publishing -- CI/CD validation -- Debugging issues - ---- - -### test_mcp - -Tests MCP server functionality. - -```bash -python tools/test_mcp.py --server-dir [--tool ] [--input ] -``` - -**Arguments:** -- `--server-dir` (required): MCP server directory -- `--tool` (optional): Specific tool to test -- `--input` (optional): Test input JSON - -**When to use:** -- Verifying tool behavior -- Testing handlers -- Debugging responses - -## MCP Concepts - -### Tools -Functions the AI can invoke: -```python -@server.tool("search") -async def search(query: str, limit: int = 10): - """Search for documents.""" - return {"results": [...]} -``` - -### Resources -Data the AI can read: -```python -@server.resource("file:///{path}") -async def read_file(path: str): - """Read file contents.""" - return {"content": ...} -``` - -### Prompts -Pre-defined prompt templates: -```python -@server.prompt("summarize") -def summarize_prompt(content: str): - """Create summary prompt.""" - return f"Summarize: {content}" -``` - -## Common Patterns - -### Create Python MCP Server -```bash -python tools/init_mcp.py --name my-server --language py --output-dir ./servers -python tools/add_mcp_tool.py --server-dir ./servers/my-server --name search --description "Search documents" --parameters '{"type": "object", "properties": {"query": {"type": "string"}}, "required": ["query"]}' -python tools/validate_mcp.py --server-dir ./servers/my-server -``` - -### Add File Resource -```bash -python tools/add_mcp_resource.py --server-dir ./my-server --uri "file:///{path}" --name "files" --description "Access local files" --mime-type text/plain -``` - -## Best Practices - -1. **Clear descriptions** - Help AI understand when to use tools -2. **Typed parameters** - Use JSON Schema for validation -3. **Error handling** - Return meaningful error messages -4. **Async handlers** - Use async for I/O operations -5. **Test thoroughly** - Verify with various inputs - -## Dependencies - -For Python servers: -- `mcp>=1.0.0` -- `httpx>=0.25.0` (for SSE transport) diff --git a/coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py b/coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py deleted file mode 100644 index 14b55bf..0000000 --- a/coderrr-skills/skills/mcp-builder/tools/add_mcp_tool.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 -""" -Add a tool to MCP server. - -Usage: - python add_mcp_tool.py --server-dir ./my-server --name search --description "Search documents" --parameters '{...}' -""" - -import argparse -import sys -import json -from pathlib import Path - - -def add_mcp_tool(server_dir: str, name: str, description: str, parameters: dict): - """Add tool to MCP server.""" - server_path = Path(server_dir) - - if not server_path.exists(): - raise ValueError(f"Server directory not found: {server_dir}") - - # Update config - config_file = server_path / 'mcp.json' - if config_file.exists(): - config = json.loads(config_file.read_text()) - else: - config = {"tools": [], "resources": []} - - tool_def = { - "name": name, - "description": description, - "parameters": parameters - } - - # Check for duplicate - if any(t["name"] == name for t in config.get("tools", [])): - raise ValueError(f"Tool '{name}' already exists") - - config["tools"].append(tool_def) - config_file.write_text(json.dumps(config, indent=2)) - - # Generate handler stub - handler_code = f''' -# Tool: {name} -# {description} -async def handle_{name.replace('-', '_')}({', '.join(parameters.get('properties', {}).keys())}): - """ - {description} - """ - # TODO: Implement tool logic - return {{"result": "Not implemented"}} -''' - - handlers_file = server_path / 'src' / 'handlers.py' - if handlers_file.exists(): - existing = handlers_file.read_text() - handlers_file.write_text(existing + handler_code) - else: - handlers_file.write_text(f'# Tool handlers\n{handler_code}') - - return { - "status": "success", - "tool": name, - "handler": str(handlers_file) - } - - -def main(): - parser = argparse.ArgumentParser(description='Add MCP tool') - parser.add_argument('--server-dir', required=True, help='Server directory') - parser.add_argument('--name', required=True, help='Tool name') - parser.add_argument('--description', required=True, help='Tool description') - parser.add_argument('--parameters', required=True, help='Parameters JSON schema') - - args = parser.parse_args() - - try: - parameters = json.loads(args.parameters) - except json.JSONDecodeError as e: - print(f"Error: Invalid parameters JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = add_mcp_tool(args.server_dir, args.name, args.description, parameters) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/mcp-builder/tools/init_mcp.py b/coderrr-skills/skills/mcp-builder/tools/init_mcp.py deleted file mode 100644 index 33cecd7..0000000 --- a/coderrr-skills/skills/mcp-builder/tools/init_mcp.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python3 -""" -Initialize a new MCP server project. - -Usage: - python init_mcp.py --name my-server --language py --output-dir ./servers -""" - -import argparse -import sys -import json -from pathlib import Path - - -PYTHON_SERVER_TEMPLATE = '''#!/usr/bin/env python3 -""" -{name} MCP Server - -A Model Context Protocol server providing [description]. -""" - -import asyncio -from mcp.server import Server -from mcp.server.stdio import stdio_server -from mcp.types import Tool, TextContent - - -# Create server instance -server = Server("{name}") - - -# Tools registry -@server.list_tools() -async def list_tools(): - """List available tools.""" - return [ - # Add tools here - ] - - -@server.call_tool() -async def call_tool(name: str, arguments: dict): - """Handle tool calls.""" - # Implement tool handlers - raise ValueError(f"Unknown tool: {{name}}") - - -# Resources registry -@server.list_resources() -async def list_resources(): - """List available resources.""" - return [ - # Add resources here - ] - - -@server.read_resource() -async def read_resource(uri: str): - """Read resource content.""" - raise ValueError(f"Unknown resource: {{uri}}") - - -async def main(): - """Run the MCP server.""" - async with stdio_server() as (read_stream, write_stream): - await server.run(read_stream, write_stream) - - -if __name__ == "__main__": - asyncio.run(main()) -''' - - -PYPROJECT_TEMPLATE = '''[project] -name = "{name}" -version = "0.1.0" -description = "MCP server for {name}" -requires-python = ">=3.10" -dependencies = [ - "mcp>=1.0.0", -] - -[project.scripts] -{name} = "src.server:main" -''' - - -README_TEMPLATE = '''# {name} - -A Model Context Protocol (MCP) server. - -## Installation - -```bash -pip install -e . -``` - -## Usage - -Run the server: -```bash -python src/server.py -``` - -## Tools - -| Tool | Description | -|------|-------------| -| (Add tools) | (Description) | - -## Resources - -| URI Pattern | Description | -|-------------|-------------| -| (Add resources) | (Description) | - -## License - -MIT -''' - - -def init_mcp(name: str, language: str, output_dir: str, transport: str = 'stdio'): - """Initialize MCP server project.""" - server_dir = Path(output_dir) / name - - # Create directories - server_dir.mkdir(parents=True, exist_ok=True) - (server_dir / 'src').mkdir(exist_ok=True) - (server_dir / 'tests').mkdir(exist_ok=True) - - if language == 'py': - # Python server - (server_dir / 'src' / 'server.py').write_text( - PYTHON_SERVER_TEMPLATE.format(name=name) - ) - (server_dir / 'src' / '__init__.py').write_text('') - (server_dir / 'pyproject.toml').write_text( - PYPROJECT_TEMPLATE.format(name=name) - ) - - # Common files - (server_dir / 'README.md').write_text(README_TEMPLATE.format(name=name)) - - # Server config - config = { - "name": name, - "language": language, - "transport": transport, - "tools": [], - "resources": [] - } - (server_dir / 'mcp.json').write_text(json.dumps(config, indent=2)) - - return { - "status": "success", - "server_dir": str(server_dir), - "language": language, - "transport": transport - } - - -def main(): - parser = argparse.ArgumentParser(description='Initialize MCP server') - parser.add_argument('--name', required=True, help='Server name') - parser.add_argument('--language', required=True, choices=['py', 'ts']) - parser.add_argument('--output-dir', required=True, help='Output directory') - parser.add_argument('--transport', default='stdio', choices=['stdio', 'sse']) - - args = parser.parse_args() - - try: - result = init_mcp(args.name, args.language, args.output_dir, args.transport) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/mcp-builder/tools/validate_mcp.py b/coderrr-skills/skills/mcp-builder/tools/validate_mcp.py deleted file mode 100644 index 8f2741d..0000000 --- a/coderrr-skills/skills/mcp-builder/tools/validate_mcp.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python3 -""" -Validate MCP server implementation. - -Usage: - python validate_mcp.py --server-dir ./my-server -""" - -import argparse -import sys -import json -from pathlib import Path -import ast - - -def validate_mcp(server_dir: str) -> dict: - """Validate MCP server.""" - server_path = Path(server_dir) - issues = [] - warnings = [] - - # Check directory exists - if not server_path.exists(): - return {"valid": False, "issues": ["Server directory not found"]} - - # Check mcp.json - config_file = server_path / 'mcp.json' - if not config_file.exists(): - issues.append("mcp.json configuration file not found") - else: - try: - config = json.loads(config_file.read_text()) - - if not config.get("name"): - warnings.append("Server name not specified in mcp.json") - - tools = config.get("tools", []) - for tool in tools: - if not tool.get("name"): - issues.append("Tool missing name") - if not tool.get("description"): - warnings.append(f"Tool '{tool.get('name', 'unknown')}' missing description") - if not tool.get("parameters"): - warnings.append(f"Tool '{tool.get('name', 'unknown')}' missing parameters schema") - - except json.JSONDecodeError: - issues.append("mcp.json is not valid JSON") - - # Check server implementation - server_file = server_path / 'src' / 'server.py' - if not server_file.exists(): - server_file = server_path / 'src' / 'index.ts' - - if not server_file.exists(): - issues.append("Server implementation not found (src/server.py or src/index.ts)") - else: - # Validate Python syntax - if server_file.suffix == '.py': - try: - ast.parse(server_file.read_text()) - except SyntaxError as e: - issues.append(f"Syntax error in server.py: line {e.lineno}") - - # Check for README - if not (server_path / 'README.md').exists(): - warnings.append("README.md not found") - - return { - "valid": len(issues) == 0, - "issues": issues, - "warnings": warnings, - "tools_count": len(config.get("tools", [])) if config_file.exists() else 0, - "resources_count": len(config.get("resources", [])) if config_file.exists() else 0 - } - - -def main(): - parser = argparse.ArgumentParser(description='Validate MCP server') - parser.add_argument('--server-dir', required=True, help='Server directory') - - args = parser.parse_args() - - try: - result = validate_mcp(args.server_dir) - result["server_dir"] = args.server_dir - print(json.dumps(result, indent=2)) - - if not result["valid"]: - sys.exit(1) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pdf/Skills.md b/coderrr-skills/skills/pdf/Skills.md deleted file mode 100644 index ffbb60d..0000000 --- a/coderrr-skills/skills/pdf/Skills.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -name: pdf -description: Comprehensive PDF toolkit for document manipulation. Use this skill when the user asks to extract text from PDFs, create PDF documents, merge or split PDFs, extract tables from PDFs, work with PDF forms, or analyze PDF structure. Handles both text extraction and PDF generation. ---- - -This skill provides complete PDF manipulation capabilities using PyPDF2 and pdfplumber for reading, and reportlab for creation. It handles text extraction, document merging/splitting, and PDF generation. - -The user provides PDF files to process or content to convert to PDF. They may want to extract information, combine documents, or create new PDFs. - -## Approach - -Before invoking tools, understand the PDF operation: -- **Extract text**: Use `extract_pdf` for text and table extraction -- **Create new PDF**: Use `create_pdf` to generate from content -- **Merge files**: Use `merge_pdf` to combine multiple PDFs -- **Split file**: Use `split_pdf` to separate pages -- **Get info**: Use `pdf_info` for metadata and structure - -## Tools - -### extract_pdf - -Extracts text and optionally tables from PDF documents. - -```bash -python tools/extract_pdf.py --file [--pages ] [--tables] [--format ] -``` - -**Arguments:** -- `--file` (required): Path to PDF file -- `--pages` (optional): Page range (e.g., "1-5", "1,3,5", "all") -- `--tables` (optional): Extract tables as structured data -- `--format` (optional): Output format - `text` or `json` (default: text) - -**Output:** Extracted text or JSON with text and tables. - -**When to use:** -- Reading PDF content -- Extracting data from reports -- Processing scanned documents (with text layer) -- Getting tabular data from PDFs - ---- - -### create_pdf - -Creates PDF documents from content specification. - -```bash -python tools/create_pdf.py --output --content [--title ] -``` - -**Arguments:** -- `--output` (required): Output PDF file path -- `--content` (required): JSON content specification -- `--title` (optional): Document title - -**Content JSON Structure:** -```json -{ - "elements": [ - {"type": "heading", "text": "Title", "size": 24}, - {"type": "paragraph", "text": "Body text..."}, - {"type": "list", "items": ["Item 1", "Item 2"]}, - {"type": "table", "headers": ["A", "B"], "rows": [["1", "2"]]}, - {"type": "page_break"} - ] -} -``` - -**When to use:** -- Generating reports -- Creating invoices -- Building PDF documents programmatically -- Converting structured data to PDF - ---- - -### merge_pdf - -Combines multiple PDF files into one. - -```bash -python tools/merge_pdf.py --files <path1> <path2> ... --output <path> -``` - -**Arguments:** -- `--files` (required): List of PDF files to merge -- `--output` (required): Output merged PDF path - -**When to use:** -- Combining report sections -- Merging scanned documents -- Creating document packages -- Assembling multi-part documents - ---- - -### split_pdf - -Splits a PDF into separate files. - -```bash -python tools/split_pdf.py --file <path> --output-dir <dir> [--pages <spec>] -``` - -**Arguments:** -- `--file` (required): PDF file to split -- `--output-dir` (required): Directory for output files -- `--pages` (optional): Page specification (e.g., "1-3,4-6" or "each" for individual pages) - -**When to use:** -- Extracting specific pages -- Breaking up large documents -- Creating individual page files -- Separating document sections - ---- - -### pdf_info - -Gets PDF metadata and structure information. - -```bash -python tools/pdf_info.py --file <path> -``` - -**Arguments:** -- `--file` (required): PDF file to analyze - -**Output:** JSON with page count, metadata, file size, and structure info. - -**When to use:** -- Checking PDF properties -- Getting page counts -- Verifying PDF integrity -- Understanding document structure - -## Common Patterns - -### Extract All Text -```bash -python tools/extract_pdf.py --file document.pdf --format text -``` - -### Extract Specific Pages -```bash -python tools/extract_pdf.py --file report.pdf --pages "1-5" -``` - -### Extract Tables as JSON -```bash -python tools/extract_pdf.py --file data.pdf --tables --format json -``` - -### Merge Multiple PDFs -```bash -python tools/merge_pdf.py --files part1.pdf part2.pdf part3.pdf --output combined.pdf -``` - -### Split Into Individual Pages -```bash -python tools/split_pdf.py --file document.pdf --output-dir ./pages --pages each -``` - -### Create Simple PDF -```bash -python tools/create_pdf.py --output report.pdf --title "Report" --content '{"elements": [{"type": "heading", "text": "Summary"}, {"type": "paragraph", "text": "Content here..."}]}' -``` - -## Best Practices - -1. **Check pdf_info first** - Understand document structure before processing -2. **Use page ranges** - Don't extract everything if you only need specific pages -3. **Handle scanned PDFs** - Some PDFs are images without text layers -4. **Preserve originals** - Merge/split create new files, don't modify originals -5. **Use tables flag** - Better structured output for tabular data - -## Dependencies - -Requires: -- `PyPDF2>=3.0.0` - PDF reading and manipulation -- `pdfplumber>=0.9.0` - Advanced text and table extraction -- `reportlab>=4.0.0` - PDF creation - -Automatically installed with the skill. diff --git a/coderrr-skills/skills/pdf/requirements.txt b/coderrr-skills/skills/pdf/requirements.txt deleted file mode 100644 index c93fb29..0000000 --- a/coderrr-skills/skills/pdf/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -PyPDF2>=3.0.0 -pdfplumber>=0.9.0 -reportlab>=4.0.0 diff --git a/coderrr-skills/skills/pdf/tools/create_pdf.py b/coderrr-skills/skills/pdf/tools/create_pdf.py deleted file mode 100644 index b5cb182..0000000 --- a/coderrr-skills/skills/pdf/tools/create_pdf.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python3 -""" -Create PDF documents from content specification. - -Usage: - python create_pdf.py --output report.pdf --content '{"elements": [...]}' -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from reportlab.lib.pagesizes import letter - from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle - from reportlab.lib.units import inch - from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, PageBreak - from reportlab.lib import colors -except ImportError: - print("Error: 'reportlab' package is required. Install with: pip install reportlab", file=sys.stderr) - sys.exit(1) - - -def create_pdf(output_path: str, content: dict, title: str = None): - """Create a PDF document from content specification.""" - doc = SimpleDocTemplate(output_path, pagesize=letter, - rightMargin=72, leftMargin=72, - topMargin=72, bottomMargin=72) - - styles = getSampleStyleSheet() - story = [] - - # Add title if provided - if title: - story.append(Paragraph(title, styles['Title'])) - story.append(Spacer(1, 0.5 * inch)) - - # Process elements - for element in content.get('elements', []): - elem_type = element.get('type', 'paragraph') - - if elem_type == 'heading': - size = element.get('size', 18) - style = ParagraphStyle('CustomHeading', parent=styles['Heading1'], fontSize=size) - story.append(Paragraph(element.get('text', ''), style)) - story.append(Spacer(1, 0.2 * inch)) - - elif elem_type == 'paragraph': - story.append(Paragraph(element.get('text', ''), styles['Normal'])) - story.append(Spacer(1, 0.1 * inch)) - - elif elem_type == 'list': - items = element.get('items', []) - for item in items: - bullet = "• " if not element.get('ordered') else f"{items.index(item) + 1}. " - story.append(Paragraph(f"{bullet}{item}", styles['Normal'])) - story.append(Spacer(1, 0.1 * inch)) - - elif elem_type == 'table': - headers = element.get('headers', []) - rows = element.get('rows', []) - data = [headers] + rows if headers else rows - - if data: - table = Table(data) - table.setStyle(TableStyle([ - ('BACKGROUND', (0, 0), (-1, 0), colors.grey), - ('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke), - ('ALIGN', (0, 0), (-1, -1), 'CENTER'), - ('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'), - ('FONTSIZE', (0, 0), (-1, 0), 12), - ('BOTTOMPADDING', (0, 0), (-1, 0), 12), - ('BACKGROUND', (0, 1), (-1, -1), colors.beige), - ('GRID', (0, 0), (-1, -1), 1, colors.black) - ])) - story.append(table) - story.append(Spacer(1, 0.2 * inch)) - - elif elem_type == 'page_break': - story.append(PageBreak()) - - elif elem_type == 'spacer': - height = element.get('height', 0.5) - story.append(Spacer(1, height * inch)) - - doc.build(story) - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Create PDF documents') - parser.add_argument('--output', required=True, help='Output PDF file path') - parser.add_argument('--content', required=True, help='JSON content specification') - parser.add_argument('--title', help='Document title') - - args = parser.parse_args() - - try: - content = json.loads(args.content) - except json.JSONDecodeError as e: - print(f"Error: Invalid content JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = create_pdf(args.output, content, args.title) - print(json.dumps({"status": "success", "file": result})) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pdf/tools/extract_pdf.py b/coderrr-skills/skills/pdf/tools/extract_pdf.py deleted file mode 100644 index 2707060..0000000 --- a/coderrr-skills/skills/pdf/tools/extract_pdf.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python3 -""" -Extract text and tables from PDF documents. - -Usage: - python extract_pdf.py --file document.pdf --format text - python extract_pdf.py --file data.pdf --tables --format json -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - import pdfplumber -except ImportError: - print("Error: 'pdfplumber' package is required. Install with: pip install pdfplumber", file=sys.stderr) - sys.exit(1) - - -def parse_page_range(page_spec: str, total_pages: int) -> list: - """Parse page specification into list of page numbers (0-indexed).""" - if not page_spec or page_spec.lower() == 'all': - return list(range(total_pages)) - - pages = [] - for part in page_spec.split(','): - part = part.strip() - if '-' in part: - start, end = part.split('-') - start = int(start) - 1 - end = min(int(end), total_pages) - pages.extend(range(start, end)) - else: - pages.append(int(part) - 1) - - return [p for p in pages if 0 <= p < total_pages] - - -def extract_pdf(file_path: str, pages: str = None, extract_tables: bool = False, output_format: str = 'text'): - """Extract content from PDF.""" - with pdfplumber.open(file_path) as pdf: - page_nums = parse_page_range(pages, len(pdf.pages)) - - if output_format == 'json': - result = { - "file": str(file_path), - "total_pages": len(pdf.pages), - "extracted_pages": len(page_nums), - "content": [] - } - - for page_num in page_nums: - page = pdf.pages[page_num] - page_data = { - "page": page_num + 1, - "text": page.extract_text() or "" - } - - if extract_tables: - tables = page.extract_tables() - page_data["tables"] = tables if tables else [] - - result["content"].append(page_data) - - return json.dumps(result, indent=2) - - else: # text format - text_parts = [] - for page_num in page_nums: - page = pdf.pages[page_num] - text = page.extract_text() - if text: - text_parts.append(f"--- Page {page_num + 1} ---\n{text}") - - if extract_tables: - tables = page.extract_tables() - for i, table in enumerate(tables): - text_parts.append(f"\n[Table {i + 1}]") - for row in table: - text_parts.append(" | ".join(str(cell) if cell else "" for cell in row)) - - return '\n\n'.join(text_parts) - - -def main(): - parser = argparse.ArgumentParser(description='Extract text and tables from PDFs') - parser.add_argument('--file', required=True, help='Path to PDF file') - parser.add_argument('--pages', help='Page range (e.g., "1-5", "1,3,5", "all")') - parser.add_argument('--tables', action='store_true', help='Extract tables') - parser.add_argument('--format', choices=['text', 'json'], default='text', help='Output format') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = extract_pdf(args.file, args.pages, args.tables, args.format) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pdf/tools/merge_pdf.py b/coderrr-skills/skills/pdf/tools/merge_pdf.py deleted file mode 100644 index 4349743..0000000 --- a/coderrr-skills/skills/pdf/tools/merge_pdf.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python3 -""" -Merge multiple PDF files into one. - -Usage: - python merge_pdf.py --files doc1.pdf doc2.pdf --output merged.pdf -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from PyPDF2 import PdfMerger -except ImportError: - print("Error: 'PyPDF2' package is required. Install with: pip install PyPDF2", file=sys.stderr) - sys.exit(1) - - -def merge_pdfs(file_paths: list, output_path: str): - """Merge multiple PDF files.""" - merger = PdfMerger() - - for file_path in file_paths: - if not Path(file_path).exists(): - raise FileNotFoundError(f"File not found: {file_path}") - merger.append(file_path) - - merger.write(output_path) - merger.close() - - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Merge PDF files') - parser.add_argument('--files', nargs='+', required=True, help='PDF files to merge') - parser.add_argument('--output', required=True, help='Output merged PDF path') - - args = parser.parse_args() - - try: - result = merge_pdfs(args.files, args.output) - print(json.dumps({ - "status": "success", - "file": result, - "merged_count": len(args.files) - })) - except FileNotFoundError as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pdf/tools/pdf_info.py b/coderrr-skills/skills/pdf/tools/pdf_info.py deleted file mode 100644 index 76ffd44..0000000 --- a/coderrr-skills/skills/pdf/tools/pdf_info.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -""" -Get PDF metadata and information. - -Usage: - python pdf_info.py --file document.pdf -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from PyPDF2 import PdfReader -except ImportError: - print("Error: 'PyPDF2' package is required. Install with: pip install PyPDF2", file=sys.stderr) - sys.exit(1) - - -def get_pdf_info(file_path: str) -> dict: - """Get PDF metadata and structure information.""" - path = Path(file_path) - reader = PdfReader(file_path) - - # Get file size - file_size = path.stat().st_size - - # Get metadata - metadata = reader.metadata - meta_dict = {} - if metadata: - for key in ['/Title', '/Author', '/Subject', '/Creator', '/Producer', '/CreationDate', '/ModDate']: - if key in metadata: - meta_dict[key.lstrip('/')] = str(metadata[key]) - - # Page info - pages_info = [] - for i, page in enumerate(reader.pages[:10]): # First 10 pages - mediabox = page.mediabox - pages_info.append({ - "page": i + 1, - "width": float(mediabox.width), - "height": float(mediabox.height) - }) - - return { - "file": str(path.absolute()), - "file_size": file_size, - "file_size_human": f"{file_size / 1024:.2f} KB" if file_size < 1024 * 1024 else f"{file_size / 1024 / 1024:.2f} MB", - "page_count": len(reader.pages), - "encrypted": reader.is_encrypted, - "metadata": meta_dict, - "pages": pages_info - } - - -def main(): - parser = argparse.ArgumentParser(description='Get PDF information') - parser.add_argument('--file', required=True, help='PDF file to analyze') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = get_pdf_info(args.file) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pdf/tools/split_pdf.py b/coderrr-skills/skills/pdf/tools/split_pdf.py deleted file mode 100644 index 5a2042d..0000000 --- a/coderrr-skills/skills/pdf/tools/split_pdf.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 -""" -Split PDF into separate files. - -Usage: - python split_pdf.py --file document.pdf --output-dir ./pages --pages each -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from PyPDF2 import PdfReader, PdfWriter -except ImportError: - print("Error: 'PyPDF2' package is required. Install with: pip install PyPDF2", file=sys.stderr) - sys.exit(1) - - -def split_pdf(file_path: str, output_dir: str, pages_spec: str = 'each'): - """Split PDF into separate files.""" - reader = PdfReader(file_path) - total_pages = len(reader.pages) - output_path = Path(output_dir) - output_path.mkdir(parents=True, exist_ok=True) - - base_name = Path(file_path).stem - output_files = [] - - if pages_spec == 'each': - # Split into individual pages - for i in range(total_pages): - writer = PdfWriter() - writer.add_page(reader.pages[i]) - - output_file = output_path / f"{base_name}_page_{i + 1}.pdf" - with open(output_file, 'wb') as f: - writer.write(f) - output_files.append(str(output_file)) - - else: - # Split by ranges (e.g., "1-3,4-6,7-10") - ranges = pages_spec.split(',') - for idx, range_spec in enumerate(ranges): - range_spec = range_spec.strip() - if '-' in range_spec: - start, end = map(int, range_spec.split('-')) - else: - start = end = int(range_spec) - - writer = PdfWriter() - for page_num in range(start - 1, min(end, total_pages)): - writer.add_page(reader.pages[page_num]) - - output_file = output_path / f"{base_name}_part_{idx + 1}.pdf" - with open(output_file, 'wb') as f: - writer.write(f) - output_files.append(str(output_file)) - - return output_files - - -def main(): - parser = argparse.ArgumentParser(description='Split PDF files') - parser.add_argument('--file', required=True, help='PDF file to split') - parser.add_argument('--output-dir', required=True, help='Output directory') - parser.add_argument('--pages', default='each', help='Page spec: "each" or ranges like "1-3,4-6"') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = split_pdf(args.file, args.output_dir, args.pages) - print(json.dumps({ - "status": "success", - "files": result, - "count": len(result) - }, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pptx/Skills.md b/coderrr-skills/skills/pptx/Skills.md deleted file mode 100644 index 1db79fe..0000000 --- a/coderrr-skills/skills/pptx/Skills.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -name: pptx -description: Create, edit, and analyze PowerPoint presentations. Use this skill when the user asks to create slides, modify presentations, extract content from PPTX files, add speaker notes, or analyze presentation structure. Supports layouts, images, charts, and professional formatting. ---- - -This skill provides comprehensive PowerPoint manipulation using python-pptx. It handles presentation creation, slide editing, content extraction, and structural analysis. - -The user provides presentation requirements or existing files to process. They may want to create new presentations, modify existing ones, or extract content from slides. - -## Approach - -Before invoking tools, understand the presentation task: -- **Create new**: Use `create_pptx` with slide specifications -- **Extract content**: Use `read_pptx` to get text and structure -- **Modify slides**: Use `edit_pptx` to update content -- **Analyze structure**: Use `analyze_pptx` for presentation overview - -## Tools - -### create_pptx - -Creates PowerPoint presentations with specified slides and content. - -```bash -python tools/create_pptx.py --output <path> --title <title> --slides <json> -``` - -**Arguments:** -- `--output` (required): Output file path (.pptx) -- `--title` (required): Presentation title (first slide) -- `--slides` (required): JSON array of slide specifications - -**Slides JSON Structure:** -```json -[ - {"layout": "title", "title": "Main Title", "subtitle": "Subtitle"}, - {"layout": "content", "title": "Slide Title", "content": ["Bullet 1", "Bullet 2"]}, - {"layout": "two_content", "title": "Comparison", "left": ["Left items"], "right": ["Right items"]}, - {"layout": "section", "title": "Section Header"}, - {"layout": "blank", "notes": "Speaker notes here"} -] -``` - -**Layouts:** -- `title` - Title slide with subtitle -- `content` - Title with bullet points -- `two_content` - Two column layout -- `section` - Section header -- `blank` - Blank slide - -**When to use:** -- Generating presentations from data -- Creating report slides -- Building pitch decks -- Automating slide generation - ---- - -### read_pptx - -Extracts content from existing PowerPoint files. - -```bash -python tools/read_pptx.py --file <path> [--format <text|json|markdown>] [--include-notes] -``` - -**Arguments:** -- `--file` (required): Path to PowerPoint file -- `--format` (optional): Output format (default: text) -- `--include-notes` (optional): Include speaker notes - -**When to use:** -- Extracting presentation content -- Converting slides to other formats -- Reading speaker notes -- Processing uploaded presentations - ---- - -### edit_pptx - -Modifies existing PowerPoint presentations. - -```bash -python tools/edit_pptx.py --file <path> --output <path> --operations <json> -``` - -**Arguments:** -- `--file` (required): Input PowerPoint file -- `--output` (required): Output file path -- `--operations` (required): JSON array of operations - -**Operations:** -```json -[ - {"action": "add_slide", "layout": "content", "title": "New Slide", "content": ["Point 1"]}, - {"action": "update_slide", "index": 2, "title": "Updated Title"}, - {"action": "add_notes", "index": 1, "notes": "Speaker notes..."}, - {"action": "delete_slide", "index": 5} -] -``` - -**When to use:** -- Adding slides to existing presentations -- Updating slide content -- Adding speaker notes -- Modifying presentation structure - ---- - -### analyze_pptx - -Analyzes presentation structure and provides metadata. - -```bash -python tools/analyze_pptx.py --file <path> -``` - -**Output:** JSON with slide count, layouts used, content summary, and word count. - -**When to use:** -- Understanding presentation structure -- Auditing slide content -- Getting presentation statistics -- Quality assurance - -## Common Patterns - -### Create Simple Presentation -```bash -python tools/create_pptx.py --output deck.pptx --title "Q4 Report" --slides '[{"layout": "content", "title": "Summary", "content": ["Revenue up 15%", "New customers: 500"]}]' -``` - -### Extract All Content -```bash -python tools/read_pptx.py --file presentation.pptx --format text --include-notes -``` - -### Add Slide to Existing Deck -```bash -python tools/edit_pptx.py --file deck.pptx --output updated.pptx --operations '[{"action": "add_slide", "layout": "content", "title": "Conclusion", "content": ["Key takeaways"]}]' -``` - -## Dependencies - -Requires `python-pptx>=0.6.21`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/pptx/requirements.txt b/coderrr-skills/skills/pptx/requirements.txt deleted file mode 100644 index 529f7b4..0000000 --- a/coderrr-skills/skills/pptx/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -python-pptx>=0.6.21 diff --git a/coderrr-skills/skills/pptx/tools/analyze_pptx.py b/coderrr-skills/skills/pptx/tools/analyze_pptx.py deleted file mode 100644 index 697faa8..0000000 --- a/coderrr-skills/skills/pptx/tools/analyze_pptx.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python3 -""" -Analyze PowerPoint presentation structure. - -Usage: - python analyze_pptx.py --file presentation.pptx -""" - -import argparse -import sys -import json -from pathlib import Path -from collections import Counter - -try: - from pptx import Presentation -except ImportError: - print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) - sys.exit(1) - - -def analyze_pptx(file_path: str) -> dict: - """Analyze presentation structure.""" - prs = Presentation(file_path) - - word_count = 0 - layouts_used = Counter() - slides_with_notes = 0 - - for slide in prs.slides: - layouts_used[slide.slide_layout.name] += 1 - - for shape in slide.shapes: - if hasattr(shape, 'text'): - word_count += len(shape.text.split()) - - if slide.has_notes_slide: - notes = slide.notes_slide.notes_text_frame.text - if notes.strip(): - slides_with_notes += 1 - - return { - "file": str(file_path), - "statistics": { - "slide_count": len(prs.slides), - "word_count": word_count, - "slides_with_notes": slides_with_notes - }, - "layouts_used": dict(layouts_used.most_common()), - "dimensions": { - "width": prs.slide_width.inches, - "height": prs.slide_height.inches - } - } - - -def main(): - parser = argparse.ArgumentParser(description='Analyze PowerPoint presentations') - parser.add_argument('--file', required=True, help='Path to PowerPoint file') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = analyze_pptx(args.file) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pptx/tools/create_pptx.py b/coderrr-skills/skills/pptx/tools/create_pptx.py deleted file mode 100644 index 56ef84a..0000000 --- a/coderrr-skills/skills/pptx/tools/create_pptx.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python3 -""" -Create PowerPoint presentations. - -Usage: - python create_pptx.py --output deck.pptx --title "Title" --slides '[...]' -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from pptx import Presentation - from pptx.util import Inches, Pt -except ImportError: - print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) - sys.exit(1) - - -def add_slide(prs, slide_spec): - """Add a slide based on specification.""" - layout_name = slide_spec.get('layout', 'content') - - # Map layout names to indices (standard template) - layout_map = { - 'title': 0, # Title Slide - 'content': 1, # Title and Content - 'section': 2, # Section Header - 'two_content': 3, # Two Content - 'comparison': 4, # Comparison - 'blank': 6 # Blank - } - - layout_idx = layout_map.get(layout_name, 1) - layout = prs.slide_layouts[layout_idx] - slide = prs.slides.add_slide(layout) - - # Add title - if hasattr(slide.shapes, 'title') and slide.shapes.title: - slide.shapes.title.text = slide_spec.get('title', '') - - # Handle different layouts - if layout_name == 'title': - # Title slide with subtitle - if len(slide.placeholders) > 1: - subtitle = slide.placeholders[1] - subtitle.text = slide_spec.get('subtitle', '') - - elif layout_name == 'content': - # Content slide with bullets - content = slide_spec.get('content', []) - if len(slide.placeholders) > 1: - body = slide.placeholders[1] - tf = body.text_frame - tf.text = content[0] if content else '' - for item in content[1:]: - p = tf.add_paragraph() - p.text = item - p.level = 0 - - elif layout_name == 'two_content': - # Two column layout - left_content = slide_spec.get('left', []) - right_content = slide_spec.get('right', []) - - placeholders = list(slide.placeholders) - if len(placeholders) > 1 and left_content: - tf = placeholders[1].text_frame - tf.text = left_content[0] - for item in left_content[1:]: - p = tf.add_paragraph() - p.text = item - - if len(placeholders) > 2 and right_content: - tf = placeholders[2].text_frame - tf.text = right_content[0] - for item in right_content[1:]: - p = tf.add_paragraph() - p.text = item - - # Add speaker notes - if 'notes' in slide_spec: - notes_slide = slide.notes_slide - notes_slide.notes_text_frame.text = slide_spec['notes'] - - return slide - - -def create_pptx(output_path: str, title: str, slides_spec: list): - """Create a PowerPoint presentation.""" - prs = Presentation() - - # Add title slide - title_slide_layout = prs.slide_layouts[0] - title_slide = prs.slides.add_slide(title_slide_layout) - title_slide.shapes.title.text = title - - # Add content slides - for slide_spec in slides_spec: - add_slide(prs, slide_spec) - - prs.save(output_path) - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Create PowerPoint presentations') - parser.add_argument('--output', required=True, help='Output file path (.pptx)') - parser.add_argument('--title', required=True, help='Presentation title') - parser.add_argument('--slides', required=True, help='JSON array of slide specifications') - - args = parser.parse_args() - - try: - slides = json.loads(args.slides) - except json.JSONDecodeError as e: - print(f"Error: Invalid slides JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = create_pptx(args.output, args.title, slides) - print(json.dumps({"status": "success", "file": result, "slides": len(slides) + 1})) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pptx/tools/edit_pptx.py b/coderrr-skills/skills/pptx/tools/edit_pptx.py deleted file mode 100644 index 4fa825c..0000000 --- a/coderrr-skills/skills/pptx/tools/edit_pptx.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python3 -""" -Edit PowerPoint presentations. - -Usage: - python edit_pptx.py --file input.pptx --output output.pptx --operations '[...]' -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from pptx import Presentation -except ImportError: - print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) - sys.exit(1) - - -def apply_operation(prs, operation): - """Apply a single edit operation.""" - action = operation.get('action') - - if action == 'add_slide': - layout_map = {'title': 0, 'content': 1, 'section': 2, 'two_content': 3, 'blank': 6} - layout_idx = layout_map.get(operation.get('layout', 'content'), 1) - layout = prs.slide_layouts[layout_idx] - slide = prs.slides.add_slide(layout) - - if hasattr(slide.shapes, 'title') and slide.shapes.title: - slide.shapes.title.text = operation.get('title', '') - - content = operation.get('content', []) - if content and len(slide.placeholders) > 1: - body = slide.placeholders[1] - tf = body.text_frame - tf.text = content[0] - for item in content[1:]: - p = tf.add_paragraph() - p.text = item - - elif action == 'update_slide': - idx = operation.get('index', 1) - 1 - if 0 <= idx < len(prs.slides): - slide = prs.slides[idx] - if 'title' in operation and hasattr(slide.shapes, 'title'): - slide.shapes.title.text = operation['title'] - - elif action == 'add_notes': - idx = operation.get('index', 1) - 1 - if 0 <= idx < len(prs.slides): - slide = prs.slides[idx] - notes_slide = slide.notes_slide - notes_slide.notes_text_frame.text = operation.get('notes', '') - - elif action == 'delete_slide': - idx = operation.get('index', 1) - 1 - if 0 <= idx < len(prs.slides): - rId = prs.slides._sldIdLst[idx].rId - prs.part.drop_rel(rId) - del prs.slides._sldIdLst[idx] - - -def edit_pptx(input_path: str, output_path: str, operations: list): - """Edit a PowerPoint presentation.""" - prs = Presentation(input_path) - - for operation in operations: - apply_operation(prs, operation) - - prs.save(output_path) - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Edit PowerPoint presentations') - parser.add_argument('--file', required=True, help='Input PowerPoint file') - parser.add_argument('--output', required=True, help='Output file path') - parser.add_argument('--operations', required=True, help='JSON array of operations') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - operations = json.loads(args.operations) - except json.JSONDecodeError as e: - print(f"Error: Invalid operations JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = edit_pptx(args.file, args.output, operations) - print(json.dumps({"status": "success", "file": result})) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/pptx/tools/read_pptx.py b/coderrr-skills/skills/pptx/tools/read_pptx.py deleted file mode 100644 index 18969a9..0000000 --- a/coderrr-skills/skills/pptx/tools/read_pptx.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env python3 -""" -Read and extract content from PowerPoint files. - -Usage: - python read_pptx.py --file presentation.pptx --format text -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from pptx import Presentation -except ImportError: - print("Error: 'python-pptx' package is required. Install with: pip install python-pptx", file=sys.stderr) - sys.exit(1) - - -def extract_slide_text(slide): - """Extract all text from a slide.""" - texts = [] - for shape in slide.shapes: - if hasattr(shape, 'text') and shape.text: - texts.append(shape.text) - return texts - - -def read_pptx(file_path: str, output_format: str = 'text', include_notes: bool = False): - """Read PowerPoint content.""" - prs = Presentation(file_path) - - if output_format == 'json': - result = { - "file": str(file_path), - "slide_count": len(prs.slides), - "slides": [] - } - - for i, slide in enumerate(prs.slides): - slide_data = { - "number": i + 1, - "layout": slide.slide_layout.name, - "content": extract_slide_text(slide) - } - - if include_notes and slide.has_notes_slide: - notes = slide.notes_slide.notes_text_frame.text - slide_data["notes"] = notes - - result["slides"].append(slide_data) - - return json.dumps(result, indent=2) - - elif output_format == 'markdown': - lines = [] - for i, slide in enumerate(prs.slides): - lines.append(f"## Slide {i + 1}") - lines.append("") - for text in extract_slide_text(slide): - lines.append(f"- {text}") - - if include_notes and slide.has_notes_slide: - notes = slide.notes_slide.notes_text_frame.text - if notes.strip(): - lines.append("") - lines.append(f"*Notes: {notes}*") - - lines.append("") - - return '\n'.join(lines) - - else: # text - lines = [] - for i, slide in enumerate(prs.slides): - lines.append(f"=== Slide {i + 1} ===") - for text in extract_slide_text(slide): - lines.append(text) - - if include_notes and slide.has_notes_slide: - notes = slide.notes_slide.notes_text_frame.text - if notes.strip(): - lines.append(f"[Notes: {notes}]") - - lines.append("") - - return '\n'.join(lines) - - -def main(): - parser = argparse.ArgumentParser(description='Read PowerPoint files') - parser.add_argument('--file', required=True, help='Path to PowerPoint file') - parser.add_argument('--format', choices=['text', 'json', 'markdown'], default='text') - parser.add_argument('--include-notes', action='store_true', help='Include speaker notes') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = read_pptx(args.file, args.format, args.include_notes) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/skill-creator/Skills.md b/coderrr-skills/skills/skill-creator/Skills.md deleted file mode 100644 index 0eb8c08..0000000 --- a/coderrr-skills/skills/skill-creator/Skills.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -name: skill-creator -description: Interactive tool for building new custom skills for Coderrr. Use this skill when the user wants to create a new skill, scaffold a skill structure, generate tool templates, or set up skill documentation. Guides through the complete skill creation process. ---- - -This skill helps create new skills for the Coderrr marketplace. It scaffolds the required file structure, generates tool templates, and creates proper documentation. - -The user wants to create a new skill. They may provide a name, description, and list of tools they want to include. - -## Approach - -When creating a new skill: -1. **Initialize**: Use `init_skill` to scaffold the structure -2. **Add tools**: Use `add_tool` for each tool in the skill -3. **Finalize**: Use `finalize_skill` to validate and complete - -## Tools - -### init_skill - -Scaffolds a new skill directory structure. - -```bash -python tools/init_skill.py --name <skill-name> --description <desc> --output-dir <path> [--author <name>] -``` - -**Arguments:** -- `--name` (required): Skill name (lowercase, hyphens for spaces) -- `--description` (required): One-line description of the skill -- `--output-dir` (required): Directory to create skill in -- `--author` (optional): Skill author name - -**Creates:** -``` -skill-name/ -ā”œā”€ā”€ Skills.md # Documentation template -ā”œā”€ā”€ requirements.txt # Empty dependencies file -└── tools/ # Empty tools directory -``` - -**When to use:** -- Starting a new skill project -- Setting up skill structure -- Creating skill scaffolding - ---- - -### add_tool - -Generates a tool template with proper structure. - -```bash -python tools/add_tool.py --skill-dir <path> --tool-name <name> --description <desc> [--args <json>] -``` - -**Arguments:** -- `--skill-dir` (required): Path to skill directory -- `--tool-name` (required): Tool name (lowercase, underscores) -- `--description` (required): What the tool does -- `--args` (optional): JSON array of argument definitions - -**Args JSON:** -```json -[ - {"name": "input", "type": "string", "required": true, "help": "Input file path"}, - {"name": "output", "type": "string", "required": false, "help": "Output file path"}, - {"name": "verbose", "type": "flag", "help": "Enable verbose output"} -] -``` - -**When to use:** -- Adding tools to a skill -- Generating tool boilerplate -- Setting up argument parsing - ---- - -### finalize_skill - -Validates and finalizes a skill for publishing. - -```bash -python tools/finalize_skill.py --skill-dir <path> [--validate-only] -``` - -**Arguments:** -- `--skill-dir` (required): Path to skill directory -- `--validate-only` (optional): Only validate, don't modify - -**Validates:** -- Skills.md has required fields -- All tools have valid Python syntax -- requirements.txt is present -- Tools have docstrings and argparse - -**When to use:** -- Before publishing a skill -- Checking skill structure -- Validating tool implementations - ---- - -### list_templates - -Lists available tool templates for common patterns. - -```bash -python tools/list_templates.py [--category <category>] -``` - -**Categories:** -- `file` - File processing tools -- `web` - Web/HTTP tools -- `data` - Data manipulation tools -- `cli` - CLI interaction tools - -**When to use:** -- Finding template inspiration -- Exploring common patterns -- Starting with working examples - -## Skill Creation Workflow - -### Step 1: Initialize -```bash -python tools/init_skill.py --name my-skill --description "Description here" --output-dir ./skills -``` - -### Step 2: Add Tools -```bash -python tools/add_tool.py --skill-dir ./skills/my-skill --tool-name process_data --description "Process data files" --args '[{"name": "input", "type": "string", "required": true, "help": "Input file"}]' -``` - -### Step 3: Implement Tool Logic -Edit the generated tool file to add your implementation. - -### Step 4: Validate -```bash -python tools/finalize_skill.py --skill-dir ./skills/my-skill --validate-only -``` - -### Step 5: Finalize -```bash -python tools/finalize_skill.py --skill-dir ./skills/my-skill -``` - -## Best Practices - -1. **Use descriptive names** - Both skill and tool names should be clear -2. **Write detailed descriptions** - Help users understand when to use the skill -3. **Include examples** - Show real usage in Skills.md -4. **Handle errors gracefully** - Use proper exit codes and stderr -5. **Output JSON** - Structured output is easier to parse -6. **Document arguments** - Help text for every argument - -## Dependencies - -None - uses Python's standard library only. diff --git a/coderrr-skills/skills/skill-creator/tools/add_tool.py b/coderrr-skills/skills/skill-creator/tools/add_tool.py deleted file mode 100644 index ea86986..0000000 --- a/coderrr-skills/skills/skill-creator/tools/add_tool.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python3 -""" -Add a tool template to a skill. - -Usage: - python add_tool.py --skill-dir ./my-skill --tool-name process_data --description "Process data" -""" - -import argparse -import sys -import json -from pathlib import Path - - -TOOL_TEMPLATE = '''#!/usr/bin/env python3 -""" -{description} - -Usage: - python {tool_name}.py {usage_args} -""" - -import argparse -import sys -import json - - -def {function_name}({function_args}): - """ - {description} - - Args: -{args_docstring} - - Returns: - dict: Result of the operation - """ - # TODO: Implement tool logic here - result = {{ - "status": "success" - }} - - return result - - -def main(): - parser = argparse.ArgumentParser(description='{description}') -{arg_parser_code} - - args = parser.parse_args() - - try: - result = {function_name}({function_call_args}) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {{e}}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() -''' - - -def generate_tool(skill_dir: str, tool_name: str, description: str, args_spec: list = None): - """Generate a tool template.""" - tools_dir = Path(skill_dir) / 'tools' - tools_dir.mkdir(exist_ok=True) - - args_spec = args_spec or [] - - # Generate code parts - function_name = tool_name.replace('-', '_') - function_args = ', '.join(arg['name'] for arg in args_spec) if args_spec else '' - - # Usage line args - usage_parts = [] - for arg in args_spec: - if arg.get('required'): - usage_parts.append(f"--{arg['name']} <{arg['name']}>") - else: - usage_parts.append(f"[--{arg['name']} <{arg['name']}>]") - usage_args = ' '.join(usage_parts) if usage_parts else '[options]' - - # Args docstring - if args_spec: - args_docstring = '\n'.join(f" {arg['name']}: {arg.get('help', 'No description')}" for arg in args_spec) - else: - args_docstring = ' None' - - # Argparser code - arg_parser_lines = [] - for arg in args_spec: - arg_type = arg.get('type', 'string') - required = arg.get('required', False) - help_text = arg.get('help', '') - - if arg_type == 'flag': - arg_parser_lines.append(f" parser.add_argument('--{arg['name']}', action='store_true', help='{help_text}')") - else: - req_str = ', required=True' if required else '' - arg_parser_lines.append(f" parser.add_argument('--{arg['name']}'{req_str}, help='{help_text}')") - - arg_parser_code = '\n'.join(arg_parser_lines) if arg_parser_lines else " # No arguments defined" - - # Function call args - function_call_args = ', '.join(f"args.{arg['name']}" for arg in args_spec) if args_spec else '' - - # Generate code - code = TOOL_TEMPLATE.format( - description=description, - tool_name=tool_name, - usage_args=usage_args, - function_name=function_name, - function_args=function_args, - args_docstring=args_docstring, - arg_parser_code=arg_parser_code, - function_call_args=function_call_args - ) - - # Write file - tool_file = tools_dir / f'{tool_name}.py' - tool_file.write_text(code) - - return { - "status": "success", - "file": str(tool_file), - "tool_name": tool_name - } - - -def main(): - parser = argparse.ArgumentParser(description='Add a tool to a skill') - parser.add_argument('--skill-dir', required=True, help='Skill directory') - parser.add_argument('--tool-name', required=True, help='Tool name') - parser.add_argument('--description', required=True, help='Tool description') - parser.add_argument('--args', help='JSON array of argument definitions') - - args = parser.parse_args() - - args_spec = [] - if args.args: - try: - args_spec = json.loads(args.args) - except json.JSONDecodeError as e: - print(f"Error: Invalid args JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = generate_tool(args.skill_dir, args.tool_name, args.description, args_spec) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/skill-creator/tools/finalize_skill.py b/coderrr-skills/skills/skill-creator/tools/finalize_skill.py deleted file mode 100644 index 69aebb8..0000000 --- a/coderrr-skills/skills/skill-creator/tools/finalize_skill.py +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env python3 -""" -Validate and finalize a skill for publishing. - -Usage: - python finalize_skill.py --skill-dir ./my-skill -""" - -import argparse -import sys -import json -import ast -from pathlib import Path -import re - - -def validate_skill(skill_dir: str) -> dict: - """Validate skill structure and files.""" - skill_path = Path(skill_dir) - issues = [] - warnings = [] - - # Check Skills.md exists - skills_md = skill_path / 'Skills.md' - if not skills_md.exists(): - issues.append("Skills.md not found") - else: - content = skills_md.read_text() - - # Check frontmatter - if not content.startswith('---'): - issues.append("Skills.md missing YAML frontmatter") - else: - # Extract frontmatter - match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL) - if match: - frontmatter = match.group(1) - if 'name:' not in frontmatter: - issues.append("Skills.md missing 'name' in frontmatter") - if 'description:' not in frontmatter: - issues.append("Skills.md missing 'description' in frontmatter") - - # Check requirements.txt exists - if not (skill_path / 'requirements.txt').exists(): - warnings.append("requirements.txt not found (optional but recommended)") - - # Check tools directory - tools_dir = skill_path / 'tools' - if not tools_dir.exists(): - issues.append("tools/ directory not found") - else: - tool_files = list(tools_dir.glob('*.py')) - if not tool_files: - warnings.append("No Python tool files found in tools/") - - # Validate each tool - for tool_file in tool_files: - try: - source = tool_file.read_text() - ast.parse(source) - - # Check for argparse - if 'argparse' not in source: - warnings.append(f"{tool_file.name}: No argparse import found") - - # Check for docstring - if '"""' not in source and "'''" not in source: - warnings.append(f"{tool_file.name}: No docstring found") - - except SyntaxError as e: - issues.append(f"{tool_file.name}: Syntax error at line {e.lineno}") - - return { - "valid": len(issues) == 0, - "issues": issues, - "warnings": warnings, - "tool_count": len(list((skill_path / 'tools').glob('*.py'))) if (skill_path / 'tools').exists() else 0 - } - - -def main(): - parser = argparse.ArgumentParser(description='Validate and finalize a skill') - parser.add_argument('--skill-dir', required=True, help='Skill directory') - parser.add_argument('--validate-only', action='store_true', help='Only validate') - - args = parser.parse_args() - - if not Path(args.skill_dir).exists(): - print(f"Error: Directory not found: {args.skill_dir}", file=sys.stderr) - sys.exit(1) - - try: - result = validate_skill(args.skill_dir) - result["skill_dir"] = args.skill_dir - - if result["valid"]: - result["message"] = "Skill is valid and ready for publishing" - else: - result["message"] = "Skill has issues that must be fixed" - - print(json.dumps(result, indent=2)) - - if not result["valid"]: - sys.exit(1) - - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/skill-creator/tools/init_skill.py b/coderrr-skills/skills/skill-creator/tools/init_skill.py deleted file mode 100644 index 95213a7..0000000 --- a/coderrr-skills/skills/skill-creator/tools/init_skill.py +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env python3 -""" -Initialize a new skill directory structure. - -Usage: - python init_skill.py --name my-skill --description "Description" --output-dir ./skills -""" - -import argparse -import sys -import json -from pathlib import Path - - -SKILLS_MD_TEMPLATE = '''--- -name: {name} -description: {description} ---- - -This skill provides [detailed description of what the skill does]. - -The user provides [what input the user gives]. They may want to [what they want to accomplish]. - -## Approach - -Before invoking tools, understand [how to decide which tool to use]: -- **Scenario 1**: Use `tool_name` for [use case] -- **Scenario 2**: Use `other_tool` for [other use case] - -## Tools - -[Add tool documentation here] - -## Common Patterns - -[Add usage examples here] - -## Best Practices - -1. [Best practice 1] -2. [Best practice 2] - -## Dependencies - -[List dependencies or "None - uses Python's standard library only."] -''' - - -def init_skill(name: str, description: str, output_dir: str, author: str = None): - """Initialize a new skill directory.""" - skill_dir = Path(output_dir) / name - - # Create directories - skill_dir.mkdir(parents=True, exist_ok=True) - (skill_dir / 'tools').mkdir(exist_ok=True) - - # Create Skills.md - skills_md = SKILLS_MD_TEMPLATE.format(name=name, description=description) - (skill_dir / 'Skills.md').write_text(skills_md) - - # Create empty requirements.txt - (skill_dir / 'requirements.txt').write_text('# Add dependencies here, one per line\n') - - return { - "status": "success", - "skill_dir": str(skill_dir), - "files_created": [ - str(skill_dir / 'Skills.md'), - str(skill_dir / 'requirements.txt'), - str(skill_dir / 'tools') - ] - } - - -def main(): - parser = argparse.ArgumentParser(description='Initialize a new skill') - parser.add_argument('--name', required=True, help='Skill name') - parser.add_argument('--description', required=True, help='Skill description') - parser.add_argument('--output-dir', required=True, help='Output directory') - parser.add_argument('--author', help='Skill author') - - args = parser.parse_args() - - # Validate name - if not args.name.replace('-', '').replace('_', '').isalnum(): - print("Error: Skill name must be alphanumeric with hyphens/underscores", file=sys.stderr) - sys.exit(1) - - try: - result = init_skill(args.name, args.description, args.output_dir, args.author) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/skill-creator/tools/list_templates.py b/coderrr-skills/skills/skill-creator/tools/list_templates.py deleted file mode 100644 index f76fe85..0000000 --- a/coderrr-skills/skills/skill-creator/tools/list_templates.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python3 -""" -List available tool templates. - -Usage: - python list_templates.py --category file -""" - -import argparse -import sys -import json - - -TEMPLATES = { - "file": [ - { - "name": "file_reader", - "description": "Read and parse various file formats", - "args": ["--file", "--format"] - }, - { - "name": "file_writer", - "description": "Write content to files with formatting", - "args": ["--output", "--content", "--format"] - }, - { - "name": "file_converter", - "description": "Convert between file formats", - "args": ["--input", "--output", "--from-format", "--to-format"] - } - ], - "web": [ - { - "name": "http_client", - "description": "Make HTTP requests with custom headers", - "args": ["--url", "--method", "--headers", "--data"] - }, - { - "name": "html_parser", - "description": "Parse and extract from HTML", - "args": ["--html", "--selector", "--format"] - }, - { - "name": "url_validator", - "description": "Validate and analyze URLs", - "args": ["--url", "--check-accessibility"] - } - ], - "data": [ - { - "name": "json_processor", - "description": "Process and transform JSON data", - "args": ["--input", "--query", "--transform"] - }, - { - "name": "csv_handler", - "description": "Read, write, and transform CSV", - "args": ["--file", "--columns", "--filter"] - }, - { - "name": "data_validator", - "description": "Validate data against schemas", - "args": ["--data", "--schema", "--format"] - } - ], - "cli": [ - { - "name": "command_runner", - "description": "Execute shell commands safely", - "args": ["--command", "--timeout", "--capture"] - }, - { - "name": "interactive_prompt", - "description": "Interactive user prompts", - "args": ["--prompt", "--type", "--default"] - } - ] -} - - -def list_templates(category: str = None): - """List available templates.""" - if category: - if category not in TEMPLATES: - return { - "error": f"Unknown category: {category}", - "available": list(TEMPLATES.keys()) - } - return { - "category": category, - "templates": TEMPLATES[category] - } - else: - return { - "categories": list(TEMPLATES.keys()), - "total_templates": sum(len(t) for t in TEMPLATES.values()), - "templates": TEMPLATES - } - - -def main(): - parser = argparse.ArgumentParser(description='List tool templates') - parser.add_argument('--category', choices=['file', 'web', 'data', 'cli'], - help='Filter by category') - - args = parser.parse_args() - - result = list_templates(args.category) - print(json.dumps(result, indent=2)) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/web-scraper/Skills.md b/coderrr-skills/skills/web-scraper/Skills.md deleted file mode 100644 index 8a1362e..0000000 --- a/coderrr-skills/skills/web-scraper/Skills.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -name: web-scraper -description: Fetch, parse, and extract content from web pages. Use this skill when the user asks to scrape websites, extract text from URLs, parse HTML content, download web pages, or analyze website content. Handles HTTP requests, HTML parsing, CSS selector targeting, and clean text extraction. ---- - -This skill enables fetching and parsing web content with production-grade error handling. It handles the full pipeline from HTTP request to clean text output, with support for CSS selectors to target specific elements. - -The user provides a URL or HTML content to process. They may want the raw HTML, extracted text, or content from specific elements on the page. - -## Approach - -Before invoking tools, understand what the user needs: -- **Raw HTML**: Use `fetch_page` alone when they need the full page source -- **Clean Text**: Chain `fetch_page` with `extract_text` for readable content -- **Specific Elements**: Use `--selector` to target navigation, articles, headers, or any CSS-selectable content -- **Batch Processing**: For multiple URLs, invoke `fetch_page` sequentially and aggregate results - -## Tools - -### fetch_page - -Fetches raw HTML content from any URL. Includes proper User-Agent headers to avoid bot detection. - -```bash -python tools/fetch_page.py --url <url> [--timeout <seconds>] -``` - -**Arguments:** -- `--url` (required): The complete URL including http:// or https:// -- `--timeout` (optional): Request timeout in seconds (default: 30) - -**Output:** Raw HTML to stdout. Errors to stderr with appropriate exit codes. - -**When to use:** -- User wants to see the page source -- First step before text extraction -- Checking if a URL is accessible -- Downloading page content for later analysis - ---- - -### extract_text - -Parses HTML and extracts clean, readable text. Automatically removes scripts, styles, navigation, headers, and footers for cleaner output. - -```bash -python tools/extract_text.py [--html <html_string>] [--selector <css_selector>] -``` - -**Arguments:** -- `--html` (optional): HTML string to parse. If omitted, reads from stdin (for piping) -- `--selector` (optional): CSS selector to target specific elements (e.g., `.article`, `#main`, `h1, h2, h3`) - -**Output:** Clean text with normalized whitespace. - -**When to use:** -- User wants readable text, not HTML -- Extracting article content from news sites -- Getting text from specific page sections -- Processing HTML that was previously fetched or provided - -## Common Patterns - -### Full Page Text Extraction -```bash -python tools/fetch_page.py --url https://example.com | python tools/extract_text.py -``` - -### Extract Only Main Content -```bash -python tools/fetch_page.py --url https://blog.example.com/post | python tools/extract_text.py --selector "article, .post-content, main" -``` - -### Extract Headlines -```bash -python tools/fetch_page.py --url https://news.site.com | python tools/extract_text.py --selector "h1, h2, h3" -``` - -### Check Page Accessibility -```bash -python tools/fetch_page.py --url https://example.com --timeout 10 -``` - -## Best Practices - -1. **Always handle errors gracefully** - Network requests can fail. Check exit codes and stderr. -2. **Use specific selectors when possible** - `.article-body` gives cleaner results than extracting everything. -3. **Respect rate limits** - Add delays between requests when processing multiple URLs. -4. **Verify URLs** - Ensure URLs include the protocol (http:// or https://). -5. **Consider timeouts** - Long timeouts for slow servers, short for quick checks. - -## Error Handling - -| Exit Code | Meaning | Recovery | -|-----------|---------|----------| -| 0 | Success | - | -| 1 | Network error, invalid URL, or HTTP error | Check URL format, verify site is accessible | -| 2 | HTML parsing error | Verify HTML is valid, check selector syntax | -| 3 | Invalid CSS selector | Fix selector syntax | - -## Dependencies - -Requires `requests>=2.28.0` and `beautifulsoup4>=4.11.0`. These are automatically installed with the skill. diff --git a/coderrr-skills/skills/web-scraper/requirements.txt b/coderrr-skills/skills/web-scraper/requirements.txt deleted file mode 100644 index a057c2e..0000000 --- a/coderrr-skills/skills/web-scraper/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -requests>=2.28.0 -beautifulsoup4>=4.11.0 diff --git a/coderrr-skills/skills/web-scraper/tools/extract_text.py b/coderrr-skills/skills/web-scraper/tools/extract_text.py deleted file mode 100644 index 748238d..0000000 --- a/coderrr-skills/skills/web-scraper/tools/extract_text.py +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env python3 -""" -Extract text content from HTML. - -This tool parses HTML and extracts clean, readable text. It can read HTML -from a command-line argument or from stdin, making it easy to pipe from -other commands like fetch_page. - -Usage: - python extract_text.py --html "<div>Hello World</div>" - cat page.html | python extract_text.py - python extract_text.py --selector ".main-content" < page.html - -Exit Codes: - 0 - Success - 2 - HTML parsing error - 3 - Invalid CSS selector -""" - -import argparse -import sys -import re - -try: - from bs4 import BeautifulSoup -except ImportError: - print("Error: 'beautifulsoup4' package is required. Install with: pip install beautifulsoup4", file=sys.stderr) - sys.exit(1) - - -def extract_text(html: str, selector: str = None) -> str: - """ - Extract text content from HTML. - - Args: - html: The HTML content to parse - selector: Optional CSS selector to target specific elements - - Returns: - Clean text extracted from the HTML - - Raises: - ValueError: If the selector is invalid - """ - try: - soup = BeautifulSoup(html, 'html.parser') - except Exception as e: - raise ValueError(f"Failed to parse HTML: {e}") - - # Remove script and style elements - for element in soup(['script', 'style', 'noscript', 'header', 'footer', 'nav']): - element.decompose() - - if selector: - try: - elements = soup.select(selector) - if not elements: - return "" - text_parts = [elem.get_text(separator=' ', strip=True) for elem in elements] - text = '\n\n'.join(text_parts) - except Exception as e: - raise ValueError(f"Invalid CSS selector '{selector}': {e}") - else: - text = soup.get_text(separator=' ', strip=True) - - # Clean up whitespace - text = re.sub(r'\s+', ' ', text) - text = re.sub(r'\n\s*\n', '\n\n', text) - text = text.strip() - - return text - - -def main(): - parser = argparse.ArgumentParser( - description='Extract text content from HTML', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python extract_text.py --html "<div>Hello World</div>" - cat page.html | python extract_text.py - python extract_text.py --selector "article" < page.html - echo "<p>Test</p>" | python extract_text.py --selector "p" - ''' - ) - parser.add_argument( - '--html', - help='HTML content to parse (if not provided, reads from stdin)' - ) - parser.add_argument( - '--selector', - help='CSS selector to target specific elements (e.g., ".content", "article", "h1")' - ) - - args = parser.parse_args() - - # Get HTML from argument or stdin - if args.html: - html = args.html - else: - if sys.stdin.isatty(): - print("Error: No HTML provided. Use --html argument or pipe HTML to stdin.", file=sys.stderr) - sys.exit(2) - html = sys.stdin.read() - - if not html.strip(): - print("Error: Empty HTML content", file=sys.stderr) - sys.exit(2) - - try: - text = extract_text(html, args.selector) - if text: - print(text) - else: - if args.selector: - print(f"No content found matching selector: {args.selector}", file=sys.stderr) - except ValueError as e: - if "selector" in str(e).lower(): - print(f"Error: {e}", file=sys.stderr) - sys.exit(3) - else: - print(f"Error: {e}", file=sys.stderr) - sys.exit(2) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/web-scraper/tools/fetch_page.py b/coderrr-skills/skills/web-scraper/tools/fetch_page.py deleted file mode 100644 index 0915771..0000000 --- a/coderrr-skills/skills/web-scraper/tools/fetch_page.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env python3 -""" -Fetch HTML content from a URL. - -This tool makes an HTTP GET request to the specified URL and outputs -the HTML content to stdout. - -Usage: - python fetch_page.py --url https://example.com - -Exit Codes: - 0 - Success - 1 - Network error or invalid URL -""" - -import argparse -import sys - -try: - import requests -except ImportError: - print("Error: 'requests' package is required. Install with: pip install requests", file=sys.stderr) - sys.exit(1) - - -def fetch_page(url: str, timeout: int = 30) -> str: - """ - Fetch the HTML content from a URL. - - Args: - url: The URL to fetch - timeout: Request timeout in seconds - - Returns: - The HTML content as a string - - Raises: - requests.RequestException: If the request fails - """ - headers = { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' - } - - response = requests.get(url, headers=headers, timeout=timeout) - response.raise_for_status() - - return response.text - - -def main(): - parser = argparse.ArgumentParser( - description='Fetch HTML content from a URL', - formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=''' -Examples: - python fetch_page.py --url https://example.com - python fetch_page.py --url https://example.com --timeout 60 - ''' - ) - parser.add_argument( - '--url', - required=True, - help='The URL to fetch' - ) - parser.add_argument( - '--timeout', - type=int, - default=30, - help='Request timeout in seconds (default: 30)' - ) - - args = parser.parse_args() - - try: - html = fetch_page(args.url, args.timeout) - print(html) - except requests.exceptions.MissingSchema: - print(f"Error: Invalid URL format. Make sure to include http:// or https://", file=sys.stderr) - sys.exit(1) - except requests.exceptions.ConnectionError: - print(f"Error: Failed to connect to {args.url}", file=sys.stderr) - sys.exit(1) - except requests.exceptions.Timeout: - print(f"Error: Request timed out after {args.timeout} seconds", file=sys.stderr) - sys.exit(1) - except requests.exceptions.HTTPError as e: - print(f"Error: HTTP {e.response.status_code} - {e.response.reason}", file=sys.stderr) - sys.exit(1) - except requests.exceptions.RequestException as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/webapp-testing/Skills.md b/coderrr-skills/skills/webapp-testing/Skills.md deleted file mode 100644 index 25c9e8c..0000000 --- a/coderrr-skills/skills/webapp-testing/Skills.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -name: webapp-testing -description: Test local web applications using Playwright browser automation. Use this skill when the user wants to test web applications, automate browser interactions, take screenshots, verify UI elements, or run end-to-end tests on web pages. ---- - -This skill provides browser automation and testing capabilities using Playwright. It handles page navigation, element interaction, screenshot capture, and test verification. - -The user wants to test a web application. They may want to verify UI elements, test user flows, check responsiveness, or capture screenshots. - -## Approach - -When testing web applications: -1. **Start**: Use `start_browser` to launch browser session -2. **Navigate**: Use `navigate` to open pages -3. **Interact**: Use `interact` to click, type, or scroll -4. **Verify**: Use `verify` to check elements and content -5. **Capture**: Use `screenshot` to save visual state -6. **Report**: Use `generate_report` to summarize results - -## Tools - -### start_browser - -Launches a browser session for testing. - -```bash -python tools/start_browser.py [--browser <chromium|firefox|webkit>] [--headless] [--viewport <WxH>] -``` - -**Arguments:** -- `--browser` (optional): Browser engine (default: chromium) -- `--headless` (optional): Run without visible window -- `--viewport` (optional): Viewport size (e.g., "1920x1080") - -**Output:** Session ID for subsequent commands. - -**When to use:** -- Starting a test session -- Configuring browser options -- Setting viewport for responsive testing - ---- - -### navigate - -Navigates to a URL. - -```bash -python tools/navigate.py --session <id> --url <url> [--wait-until <event>] -``` - -**Arguments:** -- `--session` (required): Session ID from start_browser -- `--url` (required): URL to navigate to -- `--wait-until` (optional): Wait condition - `load`, `domcontentloaded`, `networkidle` - -**When to use:** -- Opening test pages -- Navigating between routes -- Starting user flows - ---- - -### interact - -Interacts with page elements. - -```bash -python tools/interact.py --session <id> --action <action> --selector <selector> [--value <value>] -``` - -**Arguments:** -- `--session` (required): Session ID -- `--action` (required): Action - `click`, `type`, `fill`, `hover`, `scroll`, `select` -- `--selector` (required): CSS selector or text selector -- `--value` (optional): Value for type/fill/select actions - -**Selector formats:** -- CSS: `#id`, `.class`, `button[type="submit"]` -- Text: `text=Login`, `text="Sign Up"` -- Role: `role=button[name="Submit"]` - -**When to use:** -- Clicking buttons -- Filling forms -- Hovering for tooltips -- Scrolling pages - ---- - -### verify - -Verifies page state and elements. - -```bash -python tools/verify.py --session <id> --check <type> [--selector <selector>] [--expected <value>] -``` - -**Arguments:** -- `--session` (required): Session ID -- `--check` (required): Check type - `visible`, `hidden`, `text`, `value`, `title`, `url` -- `--selector` (optional): Element selector (for element checks) -- `--expected` (optional): Expected value for comparison - -**When to use:** -- Verifying element visibility -- Checking text content -- Validating form values -- Confirming navigation - ---- - -### screenshot - -Captures page screenshot. - -```bash -python tools/screenshot.py --session <id> --output <path> [--selector <selector>] [--full-page] -``` - -**Arguments:** -- `--session` (required): Session ID -- `--output` (required): Output file path -- `--selector` (optional): Capture specific element only -- `--full-page` (optional): Capture entire scrollable page - -**When to use:** -- Visual regression testing -- Documenting test results -- Bug reporting -- Before/after comparisons - ---- - -### generate_report - -Generates test report from session. - -```bash -python tools/generate_report.py --session <id> --output <path> [--format <html|json|markdown>] -``` - -**Arguments:** -- `--session` (required): Session ID -- `--output` (required): Report output path -- `--format` (optional): Report format (default: html) - -**When to use:** -- Summarizing test results -- Creating documentation -- Sharing results - -## Common Patterns - -### Test Login Flow -```bash -# Start browser -python tools/start_browser.py --headless -# Navigate to login page -python tools/navigate.py --session $SESSION --url http://localhost:3000/login -# Fill credentials -python tools/interact.py --session $SESSION --action fill --selector "#email" --value "test@example.com" -python tools/interact.py --session $SESSION --action fill --selector "#password" --value "password123" -# Click login -python tools/interact.py --session $SESSION --action click --selector "button[type=submit]" -# Verify success -python tools/verify.py --session $SESSION --check url --expected "/dashboard" -``` - -### Responsive Testing -```bash -# Mobile viewport -python tools/start_browser.py --viewport 375x667 -python tools/navigate.py --session $SESSION --url http://localhost:3000 -python tools/screenshot.py --session $SESSION --output mobile.png - -# Desktop viewport -python tools/start_browser.py --viewport 1920x1080 -python tools/navigate.py --session $SESSION --url http://localhost:3000 -python tools/screenshot.py --session $SESSION --output desktop.png -``` - -### Visual Regression -```bash -python tools/start_browser.py --headless -python tools/navigate.py --session $SESSION --url http://localhost:3000 -python tools/screenshot.py --session $SESSION --output current.png --full-page -``` - -## Best Practices - -1. **Use headless for CI** - No display needed in pipelines -2. **Wait for network idle** - Ensure page fully loaded -3. **Prefer role selectors** - More resilient than CSS -4. **Take screenshots on failure** - Helps debugging -5. **Clean up sessions** - Don't leave browsers running - -## Viewport Presets - -| Device | Viewport | -|--------|----------| -| Mobile S | 320x568 | -| Mobile M | 375x667 | -| Mobile L | 425x812 | -| Tablet | 768x1024 | -| Laptop | 1366x768 | -| Desktop | 1920x1080 | - -## Dependencies - -Requires `playwright>=1.40.0`. Run `playwright install` after pip install. diff --git a/coderrr-skills/skills/webapp-testing/requirements.txt b/coderrr-skills/skills/webapp-testing/requirements.txt deleted file mode 100644 index 4777061..0000000 --- a/coderrr-skills/skills/webapp-testing/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -playwright>=1.40.0 diff --git a/coderrr-skills/skills/webapp-testing/tools/interact.py b/coderrr-skills/skills/webapp-testing/tools/interact.py deleted file mode 100644 index 97fd90f..0000000 --- a/coderrr-skills/skills/webapp-testing/tools/interact.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python3 -""" -Interact with page elements. - -Usage: - python interact.py --session abc123 --action click --selector "#submit-btn" -""" - -import argparse -import sys -import json -from pathlib import Path - -SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' - - -def interact(session_id: str, action: str, selector: str, value: str = None): - """Perform interaction on element.""" - session_file = SESSIONS_DIR / f"{session_id}.json" - - if not session_file.exists(): - raise ValueError(f"Session not found: {session_id}") - - session = json.loads(session_file.read_text()) - - # Record interaction - interaction = { - "type": "interact", - "action": action, - "selector": selector - } - if value: - interaction["value"] = value - - session["actions"].append(interaction) - session_file.write_text(json.dumps(session, indent=2)) - - return { - "status": "success", - "session_id": session_id, - "action": action, - "selector": selector, - "value": value - } - - -def main(): - parser = argparse.ArgumentParser(description='Interact with elements') - parser.add_argument('--session', required=True, help='Session ID') - parser.add_argument('--action', required=True, - choices=['click', 'type', 'fill', 'hover', 'scroll', 'select']) - parser.add_argument('--selector', required=True, help='Element selector') - parser.add_argument('--value', help='Value for type/fill/select') - - args = parser.parse_args() - - try: - result = interact(args.session, args.action, args.selector, args.value) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/webapp-testing/tools/navigate.py b/coderrr-skills/skills/webapp-testing/tools/navigate.py deleted file mode 100644 index 1ee503b..0000000 --- a/coderrr-skills/skills/webapp-testing/tools/navigate.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python3 -""" -Navigate to a URL in browser session. - -Usage: - python navigate.py --session abc123 --url http://localhost:3000 -""" - -import argparse -import sys -import json -from pathlib import Path - -SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' - - -def navigate(session_id: str, url: str, wait_until: str = 'load'): - """Navigate to URL.""" - session_file = SESSIONS_DIR / f"{session_id}.json" - - if not session_file.exists(): - raise ValueError(f"Session not found: {session_id}") - - session = json.loads(session_file.read_text()) - - # Record navigation action - session["actions"].append({ - "type": "navigate", - "url": url, - "wait_until": wait_until - }) - session["current_url"] = url - - session_file.write_text(json.dumps(session, indent=2)) - - return { - "status": "success", - "session_id": session_id, - "url": url, - "wait_until": wait_until - } - - -def main(): - parser = argparse.ArgumentParser(description='Navigate to URL') - parser.add_argument('--session', required=True, help='Session ID') - parser.add_argument('--url', required=True, help='URL to navigate to') - parser.add_argument('--wait-until', default='load', - choices=['load', 'domcontentloaded', 'networkidle']) - - args = parser.parse_args() - - try: - result = navigate(args.session, args.url, args.wait_until) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/webapp-testing/tools/screenshot.py b/coderrr-skills/skills/webapp-testing/tools/screenshot.py deleted file mode 100644 index 283e90e..0000000 --- a/coderrr-skills/skills/webapp-testing/tools/screenshot.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 -""" -Capture page screenshot. - -Usage: - python screenshot.py --session abc123 --output ./screenshot.png -""" - -import argparse -import sys -import json -from pathlib import Path - -SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' - - -def screenshot(session_id: str, output: str, selector: str = None, full_page: bool = False): - """Capture screenshot.""" - session_file = SESSIONS_DIR / f"{session_id}.json" - - if not session_file.exists(): - raise ValueError(f"Session not found: {session_id}") - - session = json.loads(session_file.read_text()) - - # Record screenshot action - screenshot_action = { - "type": "screenshot", - "output": output, - "selector": selector, - "full_page": full_page - } - - session["actions"].append(screenshot_action) - session_file.write_text(json.dumps(session, indent=2)) - - # Create placeholder file (in real implementation, would capture actual screenshot) - output_path = Path(output) - output_path.parent.mkdir(parents=True, exist_ok=True) - output_path.write_text("Screenshot placeholder - use actual Playwright for real capture") - - return { - "status": "success", - "session_id": session_id, - "output": str(output_path.absolute()), - "full_page": full_page, - "selector": selector - } - - -def main(): - parser = argparse.ArgumentParser(description='Capture screenshot') - parser.add_argument('--session', required=True, help='Session ID') - parser.add_argument('--output', required=True, help='Output file path') - parser.add_argument('--selector', help='Capture specific element') - parser.add_argument('--full-page', action='store_true', help='Capture full page') - - args = parser.parse_args() - - try: - result = screenshot(args.session, args.output, args.selector, args.full_page) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/webapp-testing/tools/start_browser.py b/coderrr-skills/skills/webapp-testing/tools/start_browser.py deleted file mode 100644 index 71c0f5d..0000000 --- a/coderrr-skills/skills/webapp-testing/tools/start_browser.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -""" -Start a browser testing session. - -Usage: - python start_browser.py --browser chromium --headless -""" - -import argparse -import sys -import json -import uuid -from pathlib import Path - -try: - from playwright.sync_api import sync_playwright -except ImportError: - print("Error: 'playwright' package is required. Install with: pip install playwright && playwright install", file=sys.stderr) - sys.exit(1) - - -# Session storage (in real implementation, use proper session management) -SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' - - -def start_browser(browser: str = 'chromium', headless: bool = False, viewport: str = None): - """Start browser session.""" - SESSIONS_DIR.mkdir(parents=True, exist_ok=True) - - session_id = str(uuid.uuid4())[:8] - - # Parse viewport - width, height = 1280, 720 - if viewport: - parts = viewport.lower().split('x') - if len(parts) == 2: - width, height = int(parts[0]), int(parts[1]) - - # Store session config (actual browser managed separately) - session_config = { - "id": session_id, - "browser": browser, - "headless": headless, - "viewport": {"width": width, "height": height}, - "status": "ready", - "actions": [] - } - - session_file = SESSIONS_DIR / f"{session_id}.json" - session_file.write_text(json.dumps(session_config, indent=2)) - - return { - "status": "success", - "session_id": session_id, - "browser": browser, - "headless": headless, - "viewport": f"{width}x{height}" - } - - -def main(): - parser = argparse.ArgumentParser(description='Start browser session') - parser.add_argument('--browser', default='chromium', choices=['chromium', 'firefox', 'webkit']) - parser.add_argument('--headless', action='store_true') - parser.add_argument('--viewport', help='Viewport size (e.g., 1920x1080)') - - args = parser.parse_args() - - try: - result = start_browser(args.browser, args.headless, args.viewport) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/webapp-testing/tools/verify.py b/coderrr-skills/skills/webapp-testing/tools/verify.py deleted file mode 100644 index b167143..0000000 --- a/coderrr-skills/skills/webapp-testing/tools/verify.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python3 -""" -Verify page state and elements. - -Usage: - python verify.py --session abc123 --check visible --selector "#success-msg" -""" - -import argparse -import sys -import json -from pathlib import Path - -SESSIONS_DIR = Path.home() / '.coderrr' / 'playwright_sessions' - - -def verify(session_id: str, check: str, selector: str = None, expected: str = None): - """Verify page state.""" - session_file = SESSIONS_DIR / f"{session_id}.json" - - if not session_file.exists(): - raise ValueError(f"Session not found: {session_id}") - - session = json.loads(session_file.read_text()) - - # Record verification - verification = { - "type": "verify", - "check": check, - "selector": selector, - "expected": expected - } - - session["actions"].append(verification) - session_file.write_text(json.dumps(session, indent=2)) - - # In real implementation, would actually perform verification - return { - "status": "success", - "session_id": session_id, - "check": check, - "selector": selector, - "expected": expected, - "passed": True, - "message": f"Verification '{check}' passed" - } - - -def main(): - parser = argparse.ArgumentParser(description='Verify page state') - parser.add_argument('--session', required=True, help='Session ID') - parser.add_argument('--check', required=True, - choices=['visible', 'hidden', 'text', 'value', 'title', 'url']) - parser.add_argument('--selector', help='Element selector') - parser.add_argument('--expected', help='Expected value') - - args = parser.parse_args() - - try: - result = verify(args.session, args.check, args.selector, args.expected) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/xlsx/Skills.md b/coderrr-skills/skills/xlsx/Skills.md deleted file mode 100644 index c89a197..0000000 --- a/coderrr-skills/skills/xlsx/Skills.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -name: xlsx -description: Create and manipulate Excel spreadsheets with formulas and formatting. Use this skill when the user asks to create Excel files, read spreadsheet data, update cells, add formulas, format worksheets, or analyze Excel structure. Supports multiple sheets, cell formatting, and Excel formulas. ---- - -This skill provides comprehensive Excel manipulation using openpyxl. It handles spreadsheet creation, data reading/writing, formula insertion, and formatting. - -The user provides spreadsheet requirements or existing files to process. They may want to create reports, read data, update values, or apply formatting. - -## Approach - -Before invoking tools, understand the spreadsheet task: -- **Create new**: Use `create_xlsx` with data and structure -- **Read data**: Use `read_xlsx` to extract cell values -- **Modify cells**: Use `edit_xlsx` to update content -- **Analyze structure**: Use `analyze_xlsx` for workbook overview - -## Tools - -### create_xlsx - -Creates Excel workbooks with data, formulas, and formatting. - -```bash -python tools/create_xlsx.py --output <path> --sheets <json> -``` - -**Arguments:** -- `--output` (required): Output file path (.xlsx) -- `--sheets` (required): JSON specification of sheets and data - -**Sheets JSON Structure:** -```json -[ - { - "name": "Sheet1", - "headers": ["Name", "Value", "Total"], - "data": [ - ["Item A", 100, "=B2*1.1"], - ["Item B", 200, "=B3*1.1"] - ], - "column_widths": {"A": 20, "B": 15, "C": 15} - } -] -``` - -**Formula Support:** -- Start cell values with `=` for formulas -- Standard Excel formulas: `=SUM(A1:A10)`, `=AVERAGE(B:B)`, `=IF(A1>0,"Yes","No")` - -**When to use:** -- Generating data reports -- Creating templates -- Building formatted spreadsheets -- Automating Excel file creation - ---- - -### read_xlsx - -Reads data from Excel files. - -```bash -python tools/read_xlsx.py --file <path> [--sheet <name>] [--range <A1:Z100>] [--format <json|csv|text>] -``` - -**Arguments:** -- `--file` (required): Path to Excel file -- `--sheet` (optional): Sheet name (default: active sheet) -- `--range` (optional): Cell range to read (e.g., "A1:D10") -- `--format` (optional): Output format (default: json) - -**When to use:** -- Extracting spreadsheet data -- Reading specific ranges -- Converting Excel to other formats -- Processing uploaded files - ---- - -### edit_xlsx - -Modifies existing Excel files. - -```bash -python tools/edit_xlsx.py --file <path> --output <path> --operations <json> -``` - -**Arguments:** -- `--file` (required): Input Excel file -- `--output` (required): Output file path -- `--operations` (required): JSON array of operations - -**Operations:** -```json -[ - {"action": "set_cell", "sheet": "Sheet1", "cell": "A1", "value": "Updated"}, - {"action": "set_range", "sheet": "Sheet1", "start": "A2", "data": [["Row1"], ["Row2"]]}, - {"action": "add_formula", "sheet": "Sheet1", "cell": "C10", "formula": "=SUM(C1:C9)"}, - {"action": "add_sheet", "name": "NewSheet"}, - {"action": "format_cell", "sheet": "Sheet1", "cell": "A1", "bold": true, "bg_color": "FFFF00"} -] -``` - -**When to use:** -- Updating cell values -- Adding formulas -- Applying formatting -- Modifying structure - ---- - -### analyze_xlsx - -Analyzes workbook structure and statistics. - -```bash -python tools/analyze_xlsx.py --file <path> -``` - -**Output:** JSON with sheet names, dimensions, cell counts, and formula locations. - -**When to use:** -- Understanding workbook structure -- Getting sheet dimensions -- Finding formulas -- Auditing spreadsheets - -## Common Patterns - -### Create Simple Spreadsheet -```bash -python tools/create_xlsx.py --output data.xlsx --sheets '[{"name": "Data", "headers": ["ID", "Name", "Value"], "data": [[1, "Item A", 100], [2, "Item B", 200]]}]' -``` - -### Read Entire Sheet -```bash -python tools/read_xlsx.py --file data.xlsx --format json -``` - -### Read Specific Range -```bash -python tools/read_xlsx.py --file data.xlsx --sheet "Sheet1" --range "A1:C10" --format csv -``` - -### Update Cells -```bash -python tools/edit_xlsx.py --file data.xlsx --output updated.xlsx --operations '[{"action": "set_cell", "sheet": "Sheet1", "cell": "B2", "value": 150}]' -``` - -### Add Summary Formula -```bash -python tools/edit_xlsx.py --file data.xlsx --output updated.xlsx --operations '[{"action": "add_formula", "sheet": "Sheet1", "cell": "B10", "formula": "=SUM(B2:B9)"}]' -``` - -## Formula Examples - -| Formula | Description | -|---------|-------------| -| `=SUM(A1:A10)` | Sum of range | -| `=AVERAGE(B:B)` | Average of column | -| `=IF(A1>0,"Yes","No")` | Conditional | -| `=VLOOKUP(A1,Sheet2!A:B,2,FALSE)` | Lookup | -| `=CONCATENATE(A1," ",B1)` | Text join | -| `=TODAY()` | Current date | - -## Dependencies - -Requires `openpyxl>=3.1.0`. Automatically installed with the skill. diff --git a/coderrr-skills/skills/xlsx/requirements.txt b/coderrr-skills/skills/xlsx/requirements.txt deleted file mode 100644 index 9cc1e67..0000000 --- a/coderrr-skills/skills/xlsx/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -openpyxl>=3.1.0 diff --git a/coderrr-skills/skills/xlsx/tools/analyze_xlsx.py b/coderrr-skills/skills/xlsx/tools/analyze_xlsx.py deleted file mode 100644 index 5810d76..0000000 --- a/coderrr-skills/skills/xlsx/tools/analyze_xlsx.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python3 -""" -Analyze Excel workbook structure. - -Usage: - python analyze_xlsx.py --file data.xlsx -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from openpyxl import load_workbook - from openpyxl.utils import get_column_letter -except ImportError: - print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) - sys.exit(1) - - -def analyze_xlsx(file_path: str) -> dict: - """Analyze workbook structure.""" - wb = load_workbook(file_path) - - sheets_info = [] - total_cells = 0 - total_formulas = 0 - - for sheet_name in wb.sheetnames: - ws = wb[sheet_name] - - cell_count = 0 - formula_count = 0 - formulas = [] - - for row in ws.iter_rows(): - for cell in row: - if cell.value is not None: - cell_count += 1 - if isinstance(cell.value, str) and cell.value.startswith('='): - formula_count += 1 - if len(formulas) < 5: # Limit examples - formulas.append({ - "cell": cell.coordinate, - "formula": cell.value - }) - - sheets_info.append({ - "name": sheet_name, - "dimensions": f"A1:{get_column_letter(ws.max_column)}{ws.max_row}", - "rows": ws.max_row, - "columns": ws.max_column, - "cell_count": cell_count, - "formula_count": formula_count, - "sample_formulas": formulas - }) - - total_cells += cell_count - total_formulas += formula_count - - return { - "file": str(file_path), - "sheet_count": len(wb.sheetnames), - "total_cells": total_cells, - "total_formulas": total_formulas, - "sheets": sheets_info - } - - -def main(): - parser = argparse.ArgumentParser(description='Analyze Excel workbooks') - parser.add_argument('--file', required=True, help='Path to Excel file') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = analyze_xlsx(args.file) - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/xlsx/tools/create_xlsx.py b/coderrr-skills/skills/xlsx/tools/create_xlsx.py deleted file mode 100644 index 0304f67..0000000 --- a/coderrr-skills/skills/xlsx/tools/create_xlsx.py +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env python3 -""" -Create Excel workbooks. - -Usage: - python create_xlsx.py --output data.xlsx --sheets '[...]' -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from openpyxl import Workbook - from openpyxl.styles import Font, PatternFill, Alignment - from openpyxl.utils import get_column_letter -except ImportError: - print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) - sys.exit(1) - - -def create_xlsx(output_path: str, sheets_spec: list): - """Create an Excel workbook.""" - wb = Workbook() - - # Remove default sheet if we're creating new ones - if sheets_spec: - default_sheet = wb.active - wb.remove(default_sheet) - - for sheet_spec in sheets_spec: - sheet_name = sheet_spec.get('name', 'Sheet1') - ws = wb.create_sheet(title=sheet_name) - - row_num = 1 - - # Add headers - headers = sheet_spec.get('headers', []) - if headers: - for col, header in enumerate(headers, 1): - cell = ws.cell(row=row_num, column=col, value=header) - cell.font = Font(bold=True) - row_num += 1 - - # Add data - data = sheet_spec.get('data', []) - for row_data in data: - for col, value in enumerate(row_data, 1): - cell = ws.cell(row=row_num, column=col) - if isinstance(value, str) and value.startswith('='): - cell.value = value # Formula - else: - cell.value = value - row_num += 1 - - # Set column widths - col_widths = sheet_spec.get('column_widths', {}) - for col_letter, width in col_widths.items(): - ws.column_dimensions[col_letter].width = width - - wb.save(output_path) - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Create Excel workbooks') - parser.add_argument('--output', required=True, help='Output file path (.xlsx)') - parser.add_argument('--sheets', required=True, help='JSON specification of sheets') - - args = parser.parse_args() - - try: - sheets = json.loads(args.sheets) - except json.JSONDecodeError as e: - print(f"Error: Invalid sheets JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = create_xlsx(args.output, sheets) - print(json.dumps({"status": "success", "file": result, "sheets": len(sheets)})) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/xlsx/tools/edit_xlsx.py b/coderrr-skills/skills/xlsx/tools/edit_xlsx.py deleted file mode 100644 index edf0d65..0000000 --- a/coderrr-skills/skills/xlsx/tools/edit_xlsx.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python3 -""" -Edit Excel files. - -Usage: - python edit_xlsx.py --file input.xlsx --output output.xlsx --operations '[...]' -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from openpyxl import load_workbook - from openpyxl.styles import Font, PatternFill -except ImportError: - print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) - sys.exit(1) - - -def apply_operation(wb, operation): - """Apply a single edit operation.""" - action = operation.get('action') - sheet_name = operation.get('sheet', wb.active.title) - - if action == 'set_cell': - ws = wb[sheet_name] - cell = operation.get('cell', 'A1') - value = operation.get('value') - ws[cell] = value - - elif action == 'set_range': - ws = wb[sheet_name] - start = operation.get('start', 'A1') - data = operation.get('data', []) - - # Parse start cell - col_letter = ''.join(filter(str.isalpha, start)) - row_num = int(''.join(filter(str.isdigit, start))) - - for row_idx, row_data in enumerate(data): - for col_idx, value in enumerate(row_data): - ws.cell(row=row_num + row_idx, column=ord(col_letter) - ord('A') + 1 + col_idx, value=value) - - elif action == 'add_formula': - ws = wb[sheet_name] - cell = operation.get('cell', 'A1') - formula = operation.get('formula', '') - ws[cell] = formula - - elif action == 'add_sheet': - name = operation.get('name', 'NewSheet') - wb.create_sheet(title=name) - - elif action == 'format_cell': - ws = wb[sheet_name] - cell = operation.get('cell', 'A1') - cell_obj = ws[cell] - - if operation.get('bold'): - cell_obj.font = Font(bold=True) - if operation.get('bg_color'): - cell_obj.fill = PatternFill(start_color=operation['bg_color'], - end_color=operation['bg_color'], - fill_type='solid') - - -def edit_xlsx(input_path: str, output_path: str, operations: list): - """Edit an Excel file.""" - wb = load_workbook(input_path) - - for operation in operations: - apply_operation(wb, operation) - - wb.save(output_path) - return output_path - - -def main(): - parser = argparse.ArgumentParser(description='Edit Excel files') - parser.add_argument('--file', required=True, help='Input Excel file') - parser.add_argument('--output', required=True, help='Output file path') - parser.add_argument('--operations', required=True, help='JSON array of operations') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - operations = json.loads(args.operations) - except json.JSONDecodeError as e: - print(f"Error: Invalid operations JSON - {e}", file=sys.stderr) - sys.exit(1) - - try: - result = edit_xlsx(args.file, args.output, operations) - print(json.dumps({"status": "success", "file": result})) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/coderrr-skills/skills/xlsx/tools/read_xlsx.py b/coderrr-skills/skills/xlsx/tools/read_xlsx.py deleted file mode 100644 index fd3e966..0000000 --- a/coderrr-skills/skills/xlsx/tools/read_xlsx.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python3 -""" -Read data from Excel files. - -Usage: - python read_xlsx.py --file data.xlsx --format json -""" - -import argparse -import sys -import json -from pathlib import Path - -try: - from openpyxl import load_workbook - from openpyxl.utils import get_column_letter -except ImportError: - print("Error: 'openpyxl' package is required. Install with: pip install openpyxl", file=sys.stderr) - sys.exit(1) - - -def parse_range(range_str: str): - """Parse range like A1:C10 into start and end.""" - if ':' in range_str: - start, end = range_str.split(':') - return start, end - return range_str, range_str - - -def read_xlsx(file_path: str, sheet_name: str = None, cell_range: str = None, output_format: str = 'json'): - """Read Excel file data.""" - wb = load_workbook(file_path, data_only=True) - - # Get sheet - if sheet_name: - if sheet_name not in wb.sheetnames: - raise ValueError(f"Sheet '{sheet_name}' not found. Available: {wb.sheetnames}") - ws = wb[sheet_name] - else: - ws = wb.active - - # Determine range - if cell_range: - data = [] - for row in ws[cell_range]: - row_data = [cell.value for cell in row] - data.append(row_data) - else: - data = [] - for row in ws.iter_rows(values_only=True): - if any(cell is not None for cell in row): - data.append(list(row)) - - # Format output - if output_format == 'json': - return json.dumps({ - "sheet": ws.title, - "range": cell_range or f"A1:{get_column_letter(ws.max_column)}{ws.max_row}", - "data": data - }, indent=2, default=str) - - elif output_format == 'csv': - lines = [] - for row in data: - line = ','.join(str(cell) if cell is not None else '' for cell in row) - lines.append(line) - return '\n'.join(lines) - - else: # text - lines = [] - for row in data: - line = '\t'.join(str(cell) if cell is not None else '' for cell in row) - lines.append(line) - return '\n'.join(lines) - - -def main(): - parser = argparse.ArgumentParser(description='Read Excel files') - parser.add_argument('--file', required=True, help='Path to Excel file') - parser.add_argument('--sheet', help='Sheet name') - parser.add_argument('--range', help='Cell range (e.g., A1:D10)') - parser.add_argument('--format', choices=['json', 'csv', 'text'], default='json') - - args = parser.parse_args() - - if not Path(args.file).exists(): - print(f"Error: File not found: {args.file}", file=sys.stderr) - sys.exit(1) - - try: - result = read_xlsx(args.file, args.sheet, args.range, args.format) - print(result) - except Exception as e: - print(f"Error: {e}", file=sys.stderr) - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/src/agent.js b/src/agent.js index 03c4d78..39e35ee 100644 --- a/src/agent.js +++ b/src/agent.js @@ -10,6 +10,8 @@ const GitOperations = require('./gitOps'); const { sanitizeAxiosError, formatUserError, createSafeError, isNetworkError } = require('./errorHandler'); const configManager = require('./configManager'); const { getProvider } = require('./providers'); +const skillRegistry = require('./skillRegistry'); +const skillRunner = require('./skillRunner'); /** * Core AI Agent that communicates with backend and executes plans @@ -59,6 +61,10 @@ class Agent { // Track running processes spawned in separate terminals this.runningProcesses = []; + // Load installed agent skills for tool invocation + this.installedSkills = skillRegistry.loadAllSkills(); + this.toolManifest = skillRegistry.generateToolManifest(); + // Register cleanup handler for when Coderrr exits this.registerExitCleanup(); } @@ -251,6 +257,16 @@ When editing existing files, use EXACT filenames from the list above. When creat For command execution on ${osType}, use appropriate command separators (${osType === 'Windows' ? 'semicolon (;)' : 'ampersand (&&)'}).`; } + // Inject available skill tools into context (if any are installed) + if (this.toolManifest) { + enhancedPrompt = `${enhancedPrompt} + +${this.toolManifest} + +To invoke a skill tool, use the action: "invoke_skill" with "skill", "tool", and "args" properties. +Example: {"action": "invoke_skill", "skill": "web-scraper", "tool": "fetch_page", "args": {"url": "..."}, "summary": "Fetching page"}`; + } + const spinner = ui.spinner('Thinking...'); spinner.start(); @@ -451,6 +467,10 @@ For command execution on ${osType}, use appropriate command separators (${osType // Store the process handle for potential cleanup later if (!this.runningProcesses) { this.runningProcesses = []; + + // Load installed agent skills for tool invocation + this.installedSkills = skillRegistry.loadAllSkills(); + this.toolManifest = skillRegistry.generateToolManifest(); } this.runningProcesses.push(result); @@ -487,6 +507,24 @@ For command execution on ${osType}, use appropriate command separators (${osType break; } } + } else if (step.action === 'invoke_skill') { + // Execute a skill tool + ui.info(`Invoking skill tool: ${step.skill}/${step.tool}`); + + const result = await skillRunner.executeTool( + step.skill, + step.tool, + step.args || {}, + { cwd: this.workingDir } + ); + + if (result.success) { + stepResult = `Skill ${step.skill}/${step.tool} executed successfully`; + stepSuccess = true; + ui.success(`Tool output:\n${result.output}`); + } else { + throw new Error(result.error || 'Skill tool execution failed'); + } } else { // File operation const result = await this.fileOps.execute(step); diff --git a/src/executor.js b/src/executor.js index 98a4f0a..9a77601 100644 --- a/src/executor.js +++ b/src/executor.js @@ -9,6 +9,7 @@ const fsSync = require('fs'); const path = require('path'); const os = require('os'); const ui = require('./ui'); +const skillRunner = require('./skillRunner'); class CommandExecutor { constructor() { diff --git a/src/skillMarketplace.js b/src/skillMarketplace.js new file mode 100644 index 0000000..3f15030 --- /dev/null +++ b/src/skillMarketplace.js @@ -0,0 +1,249 @@ +/** + * Skill Marketplace - Remote Registry Client + * + * Connects Coderrr to the remote skill marketplace hosted on GitHub. + * Handles fetching registry, downloading skills, and caching. + */ + +const axios = require('axios'); +const fs = require('fs'); +const path = require('path'); +const os = require('os'); +const skillRegistry = require('./skillRegistry'); +const { installSkillDependencies } = require('./skillRunner'); + +// Registry configuration +const REGISTRY_URL = 'https://raw.githubusercontent.com/Akash-nath29/coderrr-skills/main/registry.json'; +const CACHE_DIR = path.join(os.homedir(), '.coderrr'); +const CACHE_FILE = path.join(CACHE_DIR, 'registry-cache.json'); +const CACHE_TTL = 5 * 60 * 1000; // 5 minutes + +/** + * Ensure cache directory exists + */ +function ensureCacheDir() { + if (!fs.existsSync(CACHE_DIR)) { + fs.mkdirSync(CACHE_DIR, { recursive: true }); + } +} + +/** + * Load cached registry if valid + * @returns {Object|null} Cached registry or null if expired/missing + */ +function loadCache() { + try { + if (!fs.existsSync(CACHE_FILE)) return null; + + const cache = JSON.parse(fs.readFileSync(CACHE_FILE, 'utf8')); + const age = Date.now() - cache.timestamp; + + if (age < CACHE_TTL) { + return cache.data; + } + return null; + } catch (e) { + return null; + } +} + +/** + * Save registry to cache + * @param {Object} data - Registry data + */ +function saveCache(data) { + ensureCacheDir(); + const cache = { + timestamp: Date.now(), + data: data + }; + fs.writeFileSync(CACHE_FILE, JSON.stringify(cache, null, 2), 'utf8'); +} + +/** + * Fetch the remote registry with caching + * @returns {Promise<Object>} Registry object + */ +async function fetchRegistry() { + // Check cache first + const cached = loadCache(); + if (cached) { + return cached; + } + + try { + const response = await axios.get(REGISTRY_URL, { timeout: 10000 }); + const registry = response.data; + + // Cache the result + saveCache(registry); + + return registry; + } catch (error) { + if (error.code === 'ENOTFOUND' || error.code === 'ETIMEDOUT') { + throw new Error('Could not connect to skill registry. Check your internet connection.'); + } + throw new Error(`Failed to fetch registry: ${error.message}`); + } +} + +/** + * Search skills by query + * @param {string} query - Search query + * @returns {Promise<Array>} Matching skills + */ +async function searchSkills(query) { + const registry = await fetchRegistry(); + const q = query.toLowerCase(); + + return Object.values(registry.skills).filter(skill => + skill.name.toLowerCase().includes(q) || + skill.description.toLowerCase().includes(q) || + (skill.tags && skill.tags.some(tag => tag.toLowerCase().includes(q))) + ); +} + +/** + * Get skill info by name + * @param {string} name - Skill name + * @returns {Promise<Object|null>} Skill info or null + */ +async function getSkillInfo(name) { + const registry = await fetchRegistry(); + return registry.skills[name] || null; +} + +/** + * List all available skills in the marketplace + * @returns {Promise<Array>} All skills + */ +async function listAvailableSkills() { + const registry = await fetchRegistry(); + return Object.values(registry.skills); +} + +/** + * Download a file from URL + * @param {string} url - File URL + * @returns {Promise<string>} File content + */ +async function downloadFile(url) { + const response = await axios.get(url, { + timeout: 30000, + responseType: 'text' + }); + return response.data; +} + +/** + * Download and install a skill from the marketplace + * @param {string} skillName - Name of the skill to install + * @returns {Promise<Object>} Installation result + */ +async function downloadSkill(skillName) { + const skillInfo = await getSkillInfo(skillName); + + if (!skillInfo) { + return { success: false, error: `Skill not found: ${skillName}` }; + } + + // Check if already installed + if (skillRegistry.isSkillInstalled(skillName)) { + return { success: false, error: `Skill "${skillName}" is already installed.` }; + } + + const skillDir = path.join(skillRegistry.SKILLS_DIR, skillName); + const toolsDir = path.join(skillDir, 'tools'); + const baseUrl = skillInfo.download_url; + + try { + // Create directories + skillRegistry.ensureSkillsDir(); + fs.mkdirSync(toolsDir, { recursive: true }); + + // Download Skills.md + console.log(` Downloading Skills.md...`); + const skillsMd = await downloadFile(`${baseUrl}/Skills.md`); + fs.writeFileSync(path.join(skillDir, 'Skills.md'), skillsMd, 'utf8'); + + // Try to download requirements.txt (optional) + try { + const requirements = await downloadFile(`${baseUrl}/requirements.txt`); + fs.writeFileSync(path.join(skillDir, 'requirements.txt'), requirements, 'utf8'); + console.log(` Found requirements.txt`); + } catch (e) { + // requirements.txt is optional, ignore + } + + // Download each tool + for (const tool of skillInfo.tools) { + console.log(` Downloading ${tool}.py...`); + try { + const toolContent = await downloadFile(`${baseUrl}/tools/${tool}.py`); + fs.writeFileSync(path.join(toolsDir, `${tool}.py`), toolContent, 'utf8'); + } catch (e) { + console.warn(` Warning: Could not download ${tool}.py`); + } + } + + // Install Python dependencies if requirements.txt exists + const depsResult = await installSkillDependencies(skillName); + if (!depsResult.success && depsResult.error) { + console.log(` Note: ${depsResult.error}`); + } + + return { + success: true, + skill: skillInfo, + message: `Skill "${skillName}" installed successfully` + }; + + } catch (error) { + // Cleanup on failure + try { + if (fs.existsSync(skillDir)) { + fs.rmSync(skillDir, { recursive: true }); + } + } catch (e) { + // Ignore cleanup errors + } + return { success: false, error: `Installation failed: ${error.message}` }; + } +} + +/** + * Check if a source is a local path or remote skill name + * @param {string} source - Source string + * @returns {boolean} True if local path + */ +function isLocalPath(source) { + // Local if starts with ./, ../, /, or contains drive letter (Windows) + return source.startsWith('./') || + source.startsWith('../') || + source.startsWith('/') || + /^[a-zA-Z]:/.test(source); +} + +/** + * Clear the registry cache + */ +function clearCache() { + try { + if (fs.existsSync(CACHE_FILE)) { + fs.unlinkSync(CACHE_FILE); + } + } catch (e) { + // Ignore + } +} + +module.exports = { + REGISTRY_URL, + fetchRegistry, + searchSkills, + getSkillInfo, + listAvailableSkills, + downloadSkill, + isLocalPath, + clearCache +}; diff --git a/src/skillRegistry.js b/src/skillRegistry.js new file mode 100644 index 0000000..4148042 --- /dev/null +++ b/src/skillRegistry.js @@ -0,0 +1,308 @@ +/** + * Skill Registry for Coderrr + * + * Discovers, loads, and manages installed agent skills from ~/.coderrr/skills/ + * Each skill contains a Skills.md description and Python tools in tools/ directory. + */ + +const fs = require('fs'); +const path = require('path'); +const os = require('os'); + +const CODERRR_DIR = path.join(os.homedir(), '.coderrr'); +const SKILLS_DIR = path.join(CODERRR_DIR, 'skills'); + +/** + * Ensure the skills directory exists + */ +function ensureSkillsDir() { + if (!fs.existsSync(SKILLS_DIR)) { + fs.mkdirSync(SKILLS_DIR, { recursive: true }); + } + return SKILLS_DIR; +} + +/** + * Get the skills directory path + * @returns {string} Path to ~/.coderrr/skills/ + */ +function getSkillsDir() { + return SKILLS_DIR; +} + +/** + * Validate that a skill folder has the required structure + * @param {string} skillPath - Full path to the skill folder + * @returns {Object} { valid: boolean, error?: string } + */ +function validateSkillStructure(skillPath) { + const skillsMarkdown = path.join(skillPath, 'Skills.md'); + const toolsDir = path.join(skillPath, 'tools'); + + if (!fs.existsSync(skillsMarkdown)) { + return { valid: false, error: 'Missing Skills.md file' }; + } + + if (!fs.existsSync(toolsDir)) { + return { valid: false, error: 'Missing tools/ directory' }; + } + + const stats = fs.statSync(toolsDir); + if (!stats.isDirectory()) { + return { valid: false, error: 'tools/ is not a directory' }; + } + + // Check for at least one .py file in tools/ + const toolFiles = fs.readdirSync(toolsDir).filter(f => f.endsWith('.py')); + if (toolFiles.length === 0) { + return { valid: false, error: 'No Python tools found in tools/' }; + } + + return { valid: true }; +} + +/** + * Parse the Skills.md file to extract skill metadata + * @param {string} skillsMarkdownPath - Path to Skills.md + * @returns {Object} { name: string, description: string, rawContent: string } + */ +function parseSkillsMarkdown(skillsMarkdownPath) { + const content = fs.readFileSync(skillsMarkdownPath, 'utf-8'); + const lines = content.split('\n'); + + let name = ''; + let description = ''; + + // Extract name from first # header + for (const line of lines) { + const headerMatch = line.match(/^#\s+(.+)/); + if (headerMatch) { + name = headerMatch[1].trim(); + break; + } + } + + // Extract description - first non-empty line after header + let foundHeader = false; + for (const line of lines) { + if (line.startsWith('#')) { + foundHeader = true; + continue; + } + if (foundHeader && line.trim()) { + description = line.trim(); + break; + } + } + + return { + name: name || path.basename(path.dirname(skillsMarkdownPath)), + description: description || 'No description provided', + rawContent: content + }; +} + +/** + * Extract tool metadata from a Python file by parsing docstrings + * @param {string} toolPath - Path to the .py file + * @returns {Object} { name: string, description: string, parameters: string[] } + */ +function parseToolMetadata(toolPath) { + const content = fs.readFileSync(toolPath, 'utf-8'); + const toolName = path.basename(toolPath, '.py'); + + let description = ''; + let parameters = []; + + // Try to extract docstring (simple pattern for triple-quoted strings) + const docstringMatch = content.match(/^"""([\s\S]*?)"""|^'''([\s\S]*?)'''/m); + if (docstringMatch) { + description = (docstringMatch[1] || docstringMatch[2] || '').trim().split('\n')[0]; + } + + // Try to extract function parameters from main() or first def + const funcMatch = content.match(/def\s+(?:main|run|\w+)\s*\(([^)]*)\)/); + if (funcMatch && funcMatch[1]) { + parameters = funcMatch[1] + .split(',') + .map(p => p.trim().split('=')[0].split(':')[0].trim()) + .filter(p => p && p !== 'self'); + } + + return { + name: toolName, + description: description || `Tool: ${toolName}`, + parameters + }; +} + +/** + * Load a single skill from its directory + * @param {string} skillName - Name of the skill (folder name) + * @returns {Object|null} Skill object or null if invalid + */ +function loadSkill(skillName) { + const skillPath = path.join(SKILLS_DIR, skillName); + + if (!fs.existsSync(skillPath)) { + return null; + } + + const validation = validateSkillStructure(skillPath); + if (!validation.valid) { + console.warn(`Skill "${skillName}" is invalid: ${validation.error}`); + return null; + } + + // Parse Skills.md + const skillsMarkdownPath = path.join(skillPath, 'Skills.md'); + const metadata = parseSkillsMarkdown(skillsMarkdownPath); + + // Load all tools + const toolsDir = path.join(skillPath, 'tools'); + const toolFiles = fs.readdirSync(toolsDir).filter(f => f.endsWith('.py')); + + const tools = toolFiles.map(toolFile => { + const toolPath = path.join(toolsDir, toolFile); + return parseToolMetadata(toolPath); + }); + + return { + name: skillName, + displayName: metadata.name, + description: metadata.description, + path: skillPath, + tools, + rawSkillsContent: metadata.rawContent + }; +} + +/** + * List all installed skills (folder names in ~/.coderrr/skills/) + * @returns {string[]} Array of skill names + */ +function listInstalledSkills() { + ensureSkillsDir(); + + if (!fs.existsSync(SKILLS_DIR)) { + return []; + } + + return fs.readdirSync(SKILLS_DIR).filter(name => { + const skillPath = path.join(SKILLS_DIR, name); + return fs.statSync(skillPath).isDirectory(); + }); +} + +/** + * Load all valid installed skills + * @returns {Object[]} Array of skill objects + */ +function loadAllSkills() { + const skillNames = listInstalledSkills(); + const skills = []; + + for (const name of skillNames) { + const skill = loadSkill(name); + if (skill) { + skills.push(skill); + } + } + + return skills; +} + +/** + * Get all available tools across all installed skills + * @returns {Object[]} Array of { skill, tool } objects + */ +function getAvailableTools() { + const skills = loadAllSkills(); + const tools = []; + + for (const skill of skills) { + for (const tool of skill.tools) { + tools.push({ + skillName: skill.name, + skillDescription: skill.description, + toolName: tool.name, + toolDescription: tool.description, + toolParameters: tool.parameters + }); + } + } + + return tools; +} + +/** + * Generate a tool manifest string for injection into LLM context + * @returns {string} Formatted manifest of all available skills and tools + */ +function generateToolManifest() { + const skills = loadAllSkills(); + + if (skills.length === 0) { + return ''; + } + + let manifest = 'AVAILABLE SKILLS & TOOLS:\n\n'; + + for (const skill of skills) { + manifest += `[${skill.name}] - ${skill.description}\n`; + + for (const tool of skill.tools) { + const params = tool.parameters.length > 0 + ? `(${tool.parameters.join(', ')})` + : '()'; + manifest += ` • ${tool.name}${params}: ${tool.description}\n`; + } + manifest += '\n'; + } + + return manifest.trim(); +} + +/** + * Check if a specific skill is installed + * @param {string} skillName - Name of the skill + * @returns {boolean} + */ +function isSkillInstalled(skillName) { + const skillPath = path.join(SKILLS_DIR, skillName); + return fs.existsSync(skillPath) && validateSkillStructure(skillPath).valid; +} + +/** + * Remove an installed skill + * @param {string} skillName - Name of the skill to remove + * @returns {boolean} True if removed successfully + */ +function removeSkill(skillName) { + const skillPath = path.join(SKILLS_DIR, skillName); + + if (!fs.existsSync(skillPath)) { + return false; + } + + // Recursively delete the skill folder + fs.rmSync(skillPath, { recursive: true, force: true }); + return true; +} + +module.exports = { + CODERRR_DIR, + SKILLS_DIR, + ensureSkillsDir, + getSkillsDir, + validateSkillStructure, + parseSkillsMarkdown, + parseToolMetadata, + loadSkill, + listInstalledSkills, + loadAllSkills, + getAvailableTools, + generateToolManifest, + isSkillInstalled, + removeSkill +}; diff --git a/src/skillRunner.js b/src/skillRunner.js new file mode 100644 index 0000000..2cb6bb8 --- /dev/null +++ b/src/skillRunner.js @@ -0,0 +1,250 @@ +/** + * Skill Runner for Coderrr + * + * Executes Python tools from installed skills in isolated subprocess. + * Captures output and returns structured results to the agent. + */ + +const { spawn } = require('child_process'); +const path = require('path'); +const fs = require('fs'); +const { SKILLS_DIR } = require('./skillRegistry'); + +/** + * Execute a Python tool from a skill + * @param {string} skillName - Name of the skill + * @param {string} toolName - Name of the tool (without .py extension) + * @param {Object} args - Arguments to pass to the tool + * @param {Object} options - Execution options + * @param {string} options.cwd - Working directory for the tool + * @param {number} options.timeout - Timeout in milliseconds (default: 30000) + * @returns {Promise<Object>} { success, output, error, exitCode } + */ +async function executeTool(skillName, toolName, args = {}, options = {}) { + const { cwd = process.cwd(), timeout = 30000 } = options; + + const toolPath = path.join(SKILLS_DIR, skillName, 'tools', `${toolName}.py`); + + // Validate tool exists + if (!fs.existsSync(toolPath)) { + return { + success: false, + output: '', + error: `Tool not found: ${skillName}/${toolName}`, + exitCode: 1 + }; + } + + // Convert args object to command line arguments + const argsList = buildArgsList(args); + + return new Promise((resolve) => { + let stdout = ''; + let stderr = ''; + let resolved = false; + + const pythonCommand = process.platform === 'win32' ? 'python' : 'python3'; + + const proc = spawn(pythonCommand, [toolPath, ...argsList], { + cwd, + env: { + ...process.env, + CODERRR_SKILL: skillName, + CODERRR_TOOL: toolName, + CODERRR_CWD: cwd + }, + shell: true, + timeout + }); + + proc.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + proc.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + if (resolved) return; + resolved = true; + + resolve({ + success: code === 0, + output: stdout.trim(), + error: stderr.trim(), + exitCode: code + }); + }); + + proc.on('error', (err) => { + if (resolved) return; + resolved = true; + + resolve({ + success: false, + output: '', + error: `Failed to execute tool: ${err.message}`, + exitCode: 1 + }); + }); + + // Timeout handler + setTimeout(() => { + if (resolved) return; + resolved = true; + + proc.kill('SIGTERM'); + resolve({ + success: false, + output: stdout.trim(), + error: `Tool execution timed out after ${timeout}ms`, + exitCode: 124 + }); + }, timeout); + }); +} + +/** + * Convert an arguments object to a list of command line arguments + * @param {Object} args - Arguments object + * @returns {string[]} List of arguments + */ +function buildArgsList(args) { + const argsList = []; + + for (const [key, value] of Object.entries(args)) { + if (value === true) { + // Boolean flag: --flag + argsList.push(`--${key}`); + } else if (value === false) { + // Skip false booleans + continue; + } else if (Array.isArray(value)) { + // Array: --key value1 --key value2 + for (const v of value) { + argsList.push(`--${key}`, String(v)); + } + } else if (value !== null && value !== undefined) { + // Key-value: --key value + argsList.push(`--${key}`, String(value)); + } + } + + return argsList; +} + +/** + * Parse tool output as JSON if possible + * @param {string} output - Raw output string + * @returns {Object|string} Parsed JSON or original string + */ +function parseToolOutput(output) { + try { + return JSON.parse(output); + } catch { + return output; + } +} + +/** + * Check if Python is available on the system + * @returns {Promise<Object>} { available, version, command } + */ +async function checkPythonAvailable() { + return new Promise((resolve) => { + const pythonCommand = process.platform === 'win32' ? 'python' : 'python3'; + + const proc = spawn(pythonCommand, ['--version'], { shell: true }); + + let output = ''; + proc.stdout.on('data', (data) => { + output += data.toString(); + }); + proc.stderr.on('data', (data) => { + output += data.toString(); + }); + + proc.on('close', (code) => { + if (code === 0) { + const versionMatch = output.match(/Python\s+(\d+\.\d+\.\d+)/); + resolve({ + available: true, + version: versionMatch ? versionMatch[1] : 'unknown', + command: pythonCommand + }); + } else { + resolve({ + available: false, + version: null, + command: null + }); + } + }); + + proc.on('error', () => { + resolve({ + available: false, + version: null, + command: null + }); + }); + }); +} + +/** + * Install Python dependencies for a skill (if requirements.txt exists) + * @param {string} skillName - Name of the skill + * @returns {Promise<Object>} { success, output, error } + */ +async function installSkillDependencies(skillName) { + const requirementsPath = path.join(SKILLS_DIR, skillName, 'requirements.txt'); + + if (!fs.existsSync(requirementsPath)) { + return { success: true, output: 'No requirements.txt found', error: '' }; + } + + return new Promise((resolve) => { + const pythonCommand = process.platform === 'win32' ? 'python' : 'python3'; + + const proc = spawn(pythonCommand, ['-m', 'pip', 'install', '-r', requirementsPath], { + shell: true, + timeout: 120000 // 2 minute timeout for pip install + }); + + let stdout = ''; + let stderr = ''; + + proc.stdout.on('data', (data) => { + stdout += data.toString(); + }); + + proc.stderr.on('data', (data) => { + stderr += data.toString(); + }); + + proc.on('close', (code) => { + resolve({ + success: code === 0, + output: stdout.trim(), + error: stderr.trim() + }); + }); + + proc.on('error', (err) => { + resolve({ + success: false, + output: '', + error: err.message + }); + }); + }); +} + +module.exports = { + executeTool, + buildArgsList, + parseToolOutput, + checkPythonAvailable, + installSkillDependencies +}; diff --git a/src/skillsUI.js b/src/skillsUI.js new file mode 100644 index 0000000..909cdcc --- /dev/null +++ b/src/skillsUI.js @@ -0,0 +1,315 @@ +/** + * Skills UI for Coderrr CLI + * + * Provides CLI commands for managing agent skills. + * Supports both local and remote (marketplace) installation. + */ + +const path = require('path'); +const fs = require('fs'); +const chalk = require('chalk'); +const inquirer = require('inquirer'); +const skillRegistry = require('./skillRegistry'); +const marketplace = require('./skillMarketplace'); +const { checkPythonAvailable, installSkillDependencies } = require('./skillRunner'); + +/** + * Display list of installed skills + */ +function displaySkillsList() { + const skills = skillRegistry.loadAllSkills(); + + if (skills.length === 0) { + console.log(chalk.yellow('\nā–² No skills installed.')); + console.log(chalk.gray(' Install skills with: coderrr install <skill-name>')); + console.log(chalk.gray(' Browse marketplace with: coderrr market\n')); + return; + } + + console.log(chalk.cyan.bold('\nā”œā”€ Installed Skills\n')); + + for (const skill of skills) { + console.log(` ${chalk.white.bold(skill.name)} - ${chalk.gray(skill.description)}`); + for (const tool of skill.tools) { + const params = tool.parameters.length > 0 ? `(${tool.parameters.join(', ')})` : '()'; + console.log(` ${chalk.green('•')} ${tool.name}${chalk.gray(params)}`); + } + console.log(); + } +} + +/** + * Install a skill from local path + * @param {string} sourcePath - Resolved path to skill folder + */ +async function installLocalSkill(sourcePath) { + if (!fs.existsSync(sourcePath)) { + console.log(chalk.red(`\nāœ— Source not found: ${sourcePath}\n`)); + return false; + } + + const validation = skillRegistry.validateSkillStructure(sourcePath); + if (!validation.valid) { + console.log(chalk.red(`\nāœ— Invalid skill: ${validation.error}`)); + console.log(chalk.gray('\n Required structure:')); + console.log(chalk.gray(' <skill>/')); + console.log(chalk.gray(' ā”œā”€ā”€ Skills.md')); + console.log(chalk.gray(' └── tools/')); + console.log(chalk.gray(' └── *.py\n')); + return false; + } + + const skillName = path.basename(sourcePath); + const targetPath = path.join(skillRegistry.SKILLS_DIR, skillName); + + if (fs.existsSync(targetPath)) { + console.log(chalk.yellow(`\nā–² Skill "${skillName}" already installed.`)); + console.log(chalk.gray(` Use: coderrr uninstall ${skillName}\n`)); + return false; + } + + skillRegistry.ensureSkillsDir(); + fs.cpSync(sourcePath, targetPath, { recursive: true }); + + const depsResult = await installSkillDependencies(skillName); + if (!depsResult.success && depsResult.error) { + console.log(chalk.yellow(`\nā–² Warning: ${depsResult.error}`)); + } + + const skill = skillRegistry.loadSkill(skillName); + console.log(chalk.green(`\nā–  Skill "${skillName}" installed!`)); + console.log(` Tools: ${skill.tools.map(t => t.name).join(', ')}\n`); + return true; +} + +/** + * Install a skill from marketplace + * @param {string} skillName - Name of skill in registry + */ +async function installRemoteSkill(skillName) { + console.log(` Fetching "${skillName}" from marketplace...`); + + const result = await marketplace.downloadSkill(skillName); + + if (!result.success) { + console.log(chalk.red(`\nāœ— ${result.error}\n`)); + return false; + } + + console.log(chalk.green(`\nā–  Skill "${skillName}" installed!`)); + console.log(` Tools: ${result.skill.tools.join(', ')}\n`); + return true; +} + +/** + * Install a skill (auto-detect local vs remote) + * @param {string} source - Local path or skill name + */ +async function installSkill(source) { + console.log(chalk.cyan.bold('\nā”œā”€ Installing Skill\n')); + + const python = await checkPythonAvailable(); + if (!python.available) { + console.log(chalk.red('āœ— Python not available. Skills require Python 3.8+.\n')); + return false; + } + console.log(chalk.green(` ā–  Python ${python.version} found`)); + + if (marketplace.isLocalPath(source)) { + return await installLocalSkill(path.resolve(source)); + } else { + return await installRemoteSkill(source); + } +} + +/** + * Uninstall a skill + * @param {string} skillName - Name of the skill + */ +async function uninstallSkill(skillName) { + if (!skillRegistry.isSkillInstalled(skillName)) { + console.log(chalk.yellow(`\nā–² Skill "${skillName}" not installed.\n`)); + return false; + } + + const { confirm } = await inquirer.prompt([{ + type: 'confirm', + name: 'confirm', + message: `Remove skill "${skillName}"?`, + default: false + }]); + + if (!confirm) { + console.log(chalk.yellow('\nā–² Cancelled.\n')); + return false; + } + + if (skillRegistry.removeSkill(skillName)) { + console.log(chalk.green(`\nā–  Skill "${skillName}" uninstalled.\n`)); + return true; + } else { + console.log(chalk.red(`\nāœ— Failed to uninstall.\n`)); + return false; + } +} + +/** + * Search marketplace for skills + * @param {string} query - Search query + */ +async function searchMarketplace(query) { + console.log(chalk.cyan.bold('\nā”œā”€ Searching Marketplace\n')); + + try { + const results = await marketplace.searchSkills(query); + + if (results.length === 0) { + console.log(chalk.yellow(` No skills found for: "${query}"\n`)); + return; + } + + console.log(` Found ${results.length} skill(s):\n`); + + for (const skill of results) { + const installed = skillRegistry.isSkillInstalled(skill.name); + const status = installed ? chalk.green(' [installed]') : ''; + + console.log(` ${chalk.cyan.bold(skill.name)}${status}`); + console.log(` ${skill.description}`); + if (skill.tags && skill.tags.length > 0) { + console.log(` ${chalk.gray('Tags: ' + skill.tags.join(', '))}`); + } + console.log(); + } + } catch (error) { + console.log(chalk.red(` āœ— ${error.message}\n`)); + } +} + +/** + * List all available skills in marketplace + */ +async function listMarketplace() { + console.log(chalk.cyan.bold('\nā”œā”€ Available Skills (Marketplace)\n')); + + try { + const skills = await marketplace.listAvailableSkills(); + + if (skills.length === 0) { + console.log(chalk.yellow(' No skills available in marketplace.\n')); + return; + } + + for (const skill of skills) { + const installed = skillRegistry.isSkillInstalled(skill.name); + const status = installed ? chalk.green(' āœ“') : ''; + + console.log(` ${chalk.cyan(skill.name)}${status} - ${skill.description}`); + } + console.log(); + console.log(chalk.gray(` Install with: coderrr install <skill-name>\n`)); + } catch (error) { + console.log(chalk.red(` āœ— ${error.message}\n`)); + } +} + +/** + * Show detailed info about a skill + * @param {string} skillName - Name of skill + */ +async function showSkillInfo(skillName) { + console.log(chalk.cyan.bold('\nā”œā”€ Skill Info\n')); + + try { + const skill = await marketplace.getSkillInfo(skillName); + + if (!skill) { + console.log(chalk.red(` Skill not found: ${skillName}\n`)); + return; + } + + const installed = skillRegistry.isSkillInstalled(skillName); + + console.log(` ${chalk.white.bold(skill.displayName || skill.name)}\n`); + console.log(` Name: ${skill.name}`); + console.log(` Status: ${installed ? chalk.green('Installed') : chalk.yellow('Not installed')}`); + console.log(` Version: ${skill.version}`); + console.log(` Author: ${skill.author}`); + console.log(` Description: ${skill.description}`); + console.log(` Tools: ${skill.tools.join(', ')}`); + if (skill.tags && skill.tags.length > 0) { + console.log(` Tags: ${skill.tags.join(', ')}`); + } + console.log(); + + if (!installed) { + console.log(chalk.gray(` Install with: coderrr install ${skillName}\n`)); + } + } catch (error) { + console.log(chalk.red(` āœ— ${error.message}\n`)); + } +} + +/** + * Register skill commands with commander program + * @param {Command} program - Commander program instance + */ +function registerSkillCommands(program) { + // List installed skills + program + .command('skills') + .description('List all installed agent skills') + .action(() => { + displaySkillsList(); + }); + + // Install a skill (local or from marketplace) + program + .command('install <source>') + .description('Install a skill (name from marketplace or local path)') + .action(async (source) => { + await installSkill(source); + }); + + // Uninstall a skill + program + .command('uninstall <skill-name>') + .description('Uninstall an installed skill') + .action(async (skillName) => { + await uninstallSkill(skillName); + }); + + // Search marketplace + program + .command('search <query>') + .description('Search for skills in the marketplace') + .action(async (query) => { + await searchMarketplace(query); + }); + + // List all available skills in marketplace + program + .command('market') + .description('Browse all available skills in the marketplace') + .action(async () => { + await listMarketplace(); + }); + + // Show skill info + program + .command('info <skill-name>') + .description('Show detailed information about a skill') + .action(async (skillName) => { + await showSkillInfo(skillName); + }); +} + +module.exports = { + displaySkillsList, + installSkill, + uninstallSkill, + searchMarketplace, + listMarketplace, + showSkillInfo, + registerSkillCommands +};